code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
import math
def UpperCamelCase_( snake_case__: float , snake_case__: int ) -> float:
UpperCAmelCase__ = u
for i in range(1 , snake_case__ ):
UpperCAmelCase__ = temp * (u - i)
return temp
def UpperCamelCase_( ) -> None:
UpperCAmelCase__ = int(input('enter the numbers of values: ' ) )
UpperCAmelCase__ = []
for _ in range(snake_case__ ):
y.append([] )
for i in range(snake_case__ ):
for j in range(snake_case__ ):
y[i].append(snake_case__ )
UpperCAmelCase__ = 0
print('enter the values of parameters in a list: ' )
UpperCAmelCase__ = list(map(snake_case__ , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(snake_case__ ):
UpperCAmelCase__ = float(input() )
UpperCAmelCase__ = int(input('enter the value to interpolate: ' ) )
UpperCAmelCase__ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , snake_case__ ):
for j in range(n - i ):
UpperCAmelCase__ = y[j + 1][i - 1] - y[j][i - 1]
UpperCAmelCase__ = y[0][0]
for i in range(1 , snake_case__ ):
summ += (ucal(snake_case__ , snake_case__ ) * y[0][i]) / math.factorial(snake_case__ )
print(f"the value at {value} is {summ}" )
if __name__ == "__main__":
main()
| 335 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BioGptTokenizer
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
UpperCAmelCase__ = dict(zip(__a , range(len(__a ) ) ) )
UpperCAmelCase__ = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(__a ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(__a ) )
def UpperCamelCase__ (self , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = 'lower newer'
UpperCAmelCase__ = 'lower newer'
return input_text, output_text
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase__ = 'lower'
UpperCAmelCase__ = ['low', 'er</w>']
UpperCAmelCase__ = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase__ = tokens + ['<unk>']
UpperCAmelCase__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
@slow
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
UpperCAmelCase__ = tokenizer.encode('sequence builders' , add_special_tokens=__a )
UpperCAmelCase__ = tokenizer.encode('multi-sequence build' , add_special_tokens=__a )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__a , __a )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 335 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_UpperCamelCase = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def UpperCamelCase_( snake_case__: Optional[int]=None ) -> int:
if subparsers is not None:
UpperCAmelCase__ = subparsers.add_parser('tpu-config' , description=_description )
else:
UpperCAmelCase__ = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
UpperCAmelCase__ = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=snake_case__ , default=snake_case__ , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=snake_case__ , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=snake_case__ , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
UpperCAmelCase__ = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=snake_case__ , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=snake_case__ )
return parser
def UpperCamelCase_( snake_case__: List[Any] ) -> Optional[int]:
UpperCAmelCase__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(snake_case__ ):
UpperCAmelCase__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCAmelCase__ = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCAmelCase__ = defaults.commands
if not args.tpu_name:
UpperCAmelCase__ = defaults.tpu_name
if not args.tpu_zone:
UpperCAmelCase__ = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCAmelCase__ = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
UpperCAmelCase__ = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , snake_case__ ):
UpperCAmelCase__ = f"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
UpperCAmelCase__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , snake_case__ ):
UpperCAmelCase__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCAmelCase__ = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [f"pip install {args.accelerate_version}"]
new_cmd += args.command
UpperCAmelCase__ = '; '.join(snake_case__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCAmelCase__ = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"Running {' '.join(snake_case__ )}" )
return
subprocess.run(snake_case__ )
print('Successfully setup pod.' )
def UpperCamelCase_( ) -> Tuple:
UpperCAmelCase__ = tpu_command_parser()
UpperCAmelCase__ = parser.parse_args()
tpu_command_launcher(snake_case__ )
| 335 |
class lowercase : # Public class to implement a graph
'''simple docstring'''
def __init__(self , __a , __a , __a ) -> None:
"""simple docstring"""
UpperCAmelCase__ = row
UpperCAmelCase__ = col
UpperCAmelCase__ = graph
def UpperCamelCase__ (self , __a , __a , __a ) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCamelCase__ (self , __a , __a , __a ) -> None:
"""simple docstring"""
UpperCAmelCase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
UpperCAmelCase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
UpperCAmelCase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __a )
def UpperCamelCase__ (self ) -> int: # And finally, count all islands.
"""simple docstring"""
UpperCAmelCase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
UpperCAmelCase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__a , __a , __a )
count += 1
return count
| 335 | 1 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCamelCase )
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , *__a , **__a ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*__a , **__a )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def UpperCamelCase__ (self , __a=None ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = {}
if top_k is not None:
UpperCAmelCase__ = top_k
return {}, {}, postprocess_params
def __call__(self , __a , **__a ) -> Any:
"""simple docstring"""
return super().__call__(__a , **__a )
def UpperCamelCase__ (self , __a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = load_image(__a )
UpperCAmelCase__ = self.image_processor(images=__a , return_tensors=self.framework )
return model_inputs
def UpperCamelCase__ (self , __a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model(**__a )
return model_outputs
def UpperCamelCase__ (self , __a , __a=5 ) -> str:
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase__ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase__ = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase__ , UpperCAmelCase__ = probs.topk(__a )
elif self.framework == "tf":
UpperCAmelCase__ = stable_softmax(model_outputs.logits , axis=-1 )[0]
UpperCAmelCase__ = tf.math.top_k(__a , k=__a )
UpperCAmelCase__ , UpperCAmelCase__ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
UpperCAmelCase__ = scores.tolist()
UpperCAmelCase__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a )]
| 335 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_UpperCamelCase = Lock()
def UpperCamelCase_( snake_case__: Optional[Any] , snake_case__: Optional[int] , snake_case__: Tuple , snake_case__: Tuple , snake_case__: Tuple , snake_case__: Dict , snake_case__: Any ) -> str:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase__ = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase__ = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def UpperCamelCase_( snake_case__: Any ) -> Tuple:
UpperCAmelCase__ = []
UpperCAmelCase__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase__ = Pipe()
UpperCAmelCase__ = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase__ = temp_rs
UpperCAmelCase__ = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
UpperCAmelCase__ = Pipe()
UpperCAmelCase__ = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase__ = temp_rs
UpperCAmelCase__ = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
UpperCAmelCase__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase_( ) -> Dict:
UpperCAmelCase__ = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
UpperCAmelCase__ = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main()
| 335 | 1 |
def UpperCamelCase_( snake_case__: int , snake_case__: list ) -> Union[str, Any]:
_enforce_args(snake_case__ , snake_case__ )
if n == 0:
return 0
UpperCAmelCase__ = float('-inf' )
for i in range(1 , n + 1 ):
UpperCAmelCase__ = max(
snake_case__ , prices[i - 1] + naive_cut_rod_recursive(n - i , snake_case__ ) )
return max_revue
def UpperCamelCase_( snake_case__: int , snake_case__: list ) -> Tuple:
_enforce_args(snake_case__ , snake_case__ )
UpperCAmelCase__ = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(snake_case__ , snake_case__ , snake_case__ )
def UpperCamelCase_( snake_case__: int , snake_case__: list , snake_case__: list ) -> Union[str, Any]:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
UpperCAmelCase__ = float('-inf' )
for i in range(1 , n + 1 ):
UpperCAmelCase__ = max(
snake_case__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , snake_case__ , snake_case__ ) , )
UpperCAmelCase__ = max_revenue
return max_rev[n]
def UpperCamelCase_( snake_case__: int , snake_case__: list ) -> Union[str, Any]:
_enforce_args(snake_case__ , snake_case__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
UpperCAmelCase__ = [float('-inf' ) for _ in range(n + 1 )]
UpperCAmelCase__ = 0
for i in range(1 , n + 1 ):
UpperCAmelCase__ = max_rev[i]
for j in range(1 , i + 1 ):
UpperCAmelCase__ = max(snake_case__ , prices[j - 1] + max_rev[i - j] )
UpperCAmelCase__ = max_revenue_i
return max_rev[n]
def UpperCamelCase_( snake_case__: int , snake_case__: list ) -> List[str]:
if n < 0:
UpperCAmelCase__ = f"n must be greater than or equal to 0. Got n = {n}"
raise ValueError(snake_case__ )
if n > len(snake_case__ ):
UpperCAmelCase__ = (
'Each integral piece of rod must have a corresponding price. '
f"Got n = {n} but length of prices = {len(snake_case__ )}"
)
raise ValueError(snake_case__ )
def UpperCamelCase_( ) -> Dict:
UpperCAmelCase__ = [6, 10, 12, 15, 20, 23]
UpperCAmelCase__ = len(snake_case__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
UpperCAmelCase__ = 36
UpperCAmelCase__ = top_down_cut_rod(snake_case__ , snake_case__ )
UpperCAmelCase__ = bottom_up_cut_rod(snake_case__ , snake_case__ )
UpperCAmelCase__ = naive_cut_rod_recursive(snake_case__ , snake_case__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 335 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
'''simple docstring'''
def __init__(self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = ''
UpperCAmelCase__ = ''
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = 256
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
def UpperCamelCase__ (self , __a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = cva.imread(__a , 0 )
UpperCAmelCase__ = copy.deepcopy(self.img )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
UpperCAmelCase__ = np.sum(__a )
for i in range(len(__a ) ):
UpperCAmelCase__ = x[i] / self.k
self.sk += prk
UpperCAmelCase__ = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase__ = int(last % last )
UpperCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__a )
UpperCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase__ = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase__ = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_UpperCamelCase = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
_UpperCamelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 335 | 1 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False, False, False
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
# Automatically constructed
__SCREAMING_SNAKE_CASE = "dict"
__SCREAMING_SNAKE_CASE = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
__SCREAMING_SNAKE_CASE = field(default="""Audio""" , init=_UpperCamelCase , repr=_UpperCamelCase )
def __call__(self ) -> Union[str, Any]:
"""simple docstring"""
return self.pa_type
def UpperCamelCase__ (self , __a ) -> dict:
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('To support encoding audio data, please install \'soundfile\'.' ) from err
if isinstance(__a , __a ):
return {"bytes": None, "path": value}
elif isinstance(__a , __a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase__ = BytesIO()
sf.write(__a , value['array'] , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('pcm' ):
# "PCM" only has raw audio bytes
if value.get('sampling_rate' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object' )
if value.get('bytes' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase__ = np.frombuffer(value['bytes'] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
UpperCAmelCase__ = np.memmap(value['path'] , dtype='h' , mode='r' ).astype(np.floataa ) / 32767
UpperCAmelCase__ = BytesIO(bytes() )
sf.write(__a , __a , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def UpperCamelCase__ (self , __a , __a = None ) -> dict:
"""simple docstring"""
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.' )
UpperCAmelCase__ , UpperCAmelCase__ = (value['path'], BytesIO(value['bytes'] )) if value['bytes'] is not None else (value['path'], None)
if path is None and file is None:
raise ValueError(F"An audio sample should have one of 'path' or 'bytes' but both are None in {value}." )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.' ) from err
UpperCAmelCase__ = xsplitext(__a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
if file is None:
UpperCAmelCase__ = token_per_repo_id or {}
UpperCAmelCase__ = path.split('::' )[-1]
try:
UpperCAmelCase__ = string_to_dict(__a , config.HUB_DATASETS_URL )['repo_id']
UpperCAmelCase__ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase__ = None
with xopen(__a , 'rb' , use_auth_token=__a ) as f:
UpperCAmelCase__ , UpperCAmelCase__ = sf.read(__a )
else:
UpperCAmelCase__ , UpperCAmelCase__ = sf.read(__a )
UpperCAmelCase__ = array.T
if self.mono:
UpperCAmelCase__ = librosa.to_mono(__a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase__ = librosa.resample(__a , orig_sr=__a , target_sr=self.sampling_rate )
UpperCAmelCase__ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCamelCase__ (self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError('Cannot flatten a decoded Audio feature.' )
return {
"bytes": Value('binary' ),
"path": Value('string' ),
}
def UpperCamelCase__ (self , __a ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCAmelCase__ = pa.array([None] * len(__a ) , type=pa.binary() )
UpperCAmelCase__ = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase__ = pa.array([None] * len(__a ) , type=pa.string() )
UpperCAmelCase__ = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('array' ):
UpperCAmelCase__ = pa.array([Audio().encode_example(__a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
UpperCAmelCase__ = storage.field('bytes' )
else:
UpperCAmelCase__ = pa.array([None] * len(__a ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
UpperCAmelCase__ = storage.field('path' )
else:
UpperCAmelCase__ = pa.array([None] * len(__a ) , type=pa.string() )
UpperCAmelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
return array_cast(__a , self.pa_type )
def UpperCamelCase__ (self , __a ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(__a ):
with xopen(__a , 'rb' ) as f:
UpperCAmelCase__ = f.read()
return bytes_
UpperCAmelCase__ = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase__ = pa.array(
[os.path.basename(__a ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
UpperCAmelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(__a , self.pa_type )
| 335 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=32 , __a=2 , __a=3 , __a=16 , __a=[1, 2, 1] , __a=[2, 2, 4] , __a=2 , __a=2.0 , __a=True , __a=0.0 , __a=0.0 , __a=0.1 , __a="gelu" , __a=False , __a=True , __a=0.02 , __a=1E-5 , __a=True , __a=None , __a=True , __a=10 , __a=8 , ) -> str:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = embed_dim
UpperCAmelCase__ = depths
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = window_size
UpperCAmelCase__ = mlp_ratio
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = use_absolute_embeddings
UpperCAmelCase__ = patch_norm
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = is_training
UpperCAmelCase__ = scope
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = encoder_stride
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCamelCase__ (self , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = SwinvaModel(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a )
UpperCAmelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase__ (self , __a , __a , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = SwinvaForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase__ = 1
UpperCAmelCase__ = SwinvaForMaskedImageModeling(__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase__ (self , __a , __a , __a ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.type_sequence_label_size
UpperCAmelCase__ = SwinvaForImageClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = SwinvaModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__a , embed_dim=37 )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = outputs.attentions
UpperCAmelCase__ = len(self.model_tester.depths )
self.assertEqual(len(__a ) , __a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase__ = True
UpperCAmelCase__ = config.window_size**2
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = outputs.attentions
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
UpperCAmelCase__ = len(__a )
# Check attention is always last and order is fine
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
UpperCAmelCase__ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCAmelCase__ = 2
self.assertEqual(out_len + added_hidden_states , len(__a ) )
UpperCAmelCase__ = outputs.attentions
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def UpperCamelCase__ (self , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = outputs.hidden_states
UpperCAmelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# Swinv2 has a different seq_length
UpperCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCAmelCase__ = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = reshaped_hidden_states[0].shape
UpperCAmelCase__ = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , __a )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = 3
UpperCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = SwinvaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = _config_zero_init(__a )
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
__a )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
UpperCAmelCase__ = image_processor(images=__a , return_tensors='pt' ).to(__a )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**__a )
# verify the logits
UpperCAmelCase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
UpperCAmelCase__ = torch.tensor([-0.39_47, -0.43_06, 0.00_26] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
| 335 | 1 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BioGptTokenizer
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
UpperCAmelCase__ = dict(zip(__a , range(len(__a ) ) ) )
UpperCAmelCase__ = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(__a ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(__a ) )
def UpperCamelCase__ (self , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = 'lower newer'
UpperCAmelCase__ = 'lower newer'
return input_text, output_text
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase__ = 'lower'
UpperCAmelCase__ = ['low', 'er</w>']
UpperCAmelCase__ = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase__ = tokens + ['<unk>']
UpperCAmelCase__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
@slow
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
UpperCAmelCase__ = tokenizer.encode('sequence builders' , add_special_tokens=__a )
UpperCAmelCase__ = tokenizer.encode('multi-sequence build' , add_special_tokens=__a )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__a , __a )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 335 |
from collections import deque
def UpperCamelCase_( snake_case__: Tuple ) -> Tuple:
UpperCAmelCase__ = len(snake_case__ )
UpperCAmelCase__ = deque()
UpperCAmelCase__ = [False for _ in range(snake_case__ )]
UpperCAmelCase__ = [-1 for _ in range(snake_case__ )]
UpperCAmelCase__ = index_of[:]
def strong_connect(snake_case__: List[str] , snake_case__: List[str] , snake_case__: List[str] ):
UpperCAmelCase__ = index # the number when this node is seen
UpperCAmelCase__ = index # lowest rank node reachable from here
index += 1
stack.append(snake_case__ )
UpperCAmelCase__ = True
for w in g[v]:
if index_of[w] == -1:
UpperCAmelCase__ = strong_connect(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCAmelCase__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCAmelCase__ = []
UpperCAmelCase__ = stack.pop()
UpperCAmelCase__ = False
component.append(snake_case__ )
while w != v:
UpperCAmelCase__ = stack.pop()
UpperCAmelCase__ = False
component.append(snake_case__ )
components.append(snake_case__ )
return index
UpperCAmelCase__ = []
for v in range(snake_case__ ):
if index_of[v] == -1:
strong_connect(snake_case__ , 0 , snake_case__ )
return components
def UpperCamelCase_( snake_case__: Dict , snake_case__: List[Any] ) -> Optional[int]:
UpperCAmelCase__ = [[] for _ in range(snake_case__ )]
for u, v in edges:
g[u].append(snake_case__ )
return g
if __name__ == "__main__":
# Test
_UpperCamelCase = 7
_UpperCamelCase = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_UpperCamelCase = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_UpperCamelCase = [(u, v) for u, v in zip(source, target)]
_UpperCamelCase = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 335 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def UpperCamelCase_( snake_case__: Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase__ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowercase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = StableDiffusionLatentUpscalePipeline
__SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""height""",
"""width""",
"""cross_attention_kwargs""",
"""negative_prompt_embeds""",
"""prompt_embeds""",
}
__SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {"""num_images_per_prompt"""}
__SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE = frozenset([] )
__SCREAMING_SNAKE_CASE = True
@property
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = 1
UpperCAmelCase__ = 4
UpperCAmelCase__ = (16, 16)
UpperCAmelCase__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = UNetaDConditionModel(
act_fn='gelu' , attention_head_dim=8 , norm_num_groups=__a , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) , in_channels=8 , mid_block_type=__a , only_cross_attention=__a , out_channels=5 , resnet_time_scale_shift='scale_shift' , time_embedding_type='fourier' , timestep_post_act='gelu' , up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') , )
UpperCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
UpperCAmelCase__ = EulerDiscreteScheduler(prediction_type='sample' )
UpperCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='quick_gelu' , projection_dim=512 , )
UpperCAmelCase__ = CLIPTextModel(__a )
UpperCAmelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase__ = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def UpperCamelCase__ (self , __a , __a=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(__a ).startswith('mps' ):
UpperCAmelCase__ = torch.manual_seed(__a )
else:
UpperCAmelCase__ = torch.Generator(device=__a ).manual_seed(__a )
UpperCAmelCase__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = 'cpu'
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase__ = self.get_dummy_inputs(__a )
UpperCAmelCase__ = pipe(**__a ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
UpperCAmelCase__ = np.array(
[0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] )
UpperCAmelCase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1E-3 )
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**__a )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase__ = self.get_dummy_inputs(__a )
UpperCAmelCase__ = 2
UpperCAmelCase__ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
UpperCAmelCase__ = getattr(__a , scheduler_enum.name )
UpperCAmelCase__ = scheduler_cls.from_config(pipe.scheduler.config )
UpperCAmelCase__ = pipe(**__a )[0]
outputs.append(__a )
assert check_same_shape(__a )
@require_torch_gpu
@slow
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = torch.manual_seed(33 )
UpperCAmelCase__ = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' , torch_dtype=torch.floataa )
pipe.to('cuda' )
UpperCAmelCase__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
UpperCAmelCase__ = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
UpperCAmelCase__ = pipe(__a , generator=__a , output_type='latent' ).images
UpperCAmelCase__ = upscaler(
prompt=__a , image=__a , num_inference_steps=20 , guidance_scale=0 , generator=__a , output_type='np' , ).images[0]
UpperCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = torch.manual_seed(33 )
UpperCAmelCase__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
UpperCAmelCase__ = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
UpperCAmelCase__ = upscaler(
prompt=__a , image=__a , num_inference_steps=20 , guidance_scale=0 , generator=__a , output_type='np' , ).images[0]
UpperCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 335 |
from ...configuration_utils import PretrainedConfig
_UpperCamelCase = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """tapas"""
def __init__(self , __a=30522 , __a=768 , __a=12 , __a=12 , __a=3072 , __a="gelu" , __a=0.1 , __a=0.1 , __a=1024 , __a=[3, 256, 256, 2, 256, 256, 10] , __a=0.02 , __a=1E-1_2 , __a=0 , __a=10.0 , __a=0 , __a=1.0 , __a=None , __a=1.0 , __a=False , __a=None , __a=1.0 , __a=1.0 , __a=False , __a=False , __a="ratio" , __a=None , __a=None , __a=64 , __a=32 , __a=False , __a=True , __a=False , __a=False , __a=True , __a=False , __a=None , __a=None , **__a , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=__a , **__a )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_sizes
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
# Fine-tuning task hyperparameters
UpperCAmelCase__ = positive_label_weight
UpperCAmelCase__ = num_aggregation_labels
UpperCAmelCase__ = aggregation_loss_weight
UpperCAmelCase__ = use_answer_as_supervision
UpperCAmelCase__ = answer_loss_importance
UpperCAmelCase__ = use_normalized_answer_loss
UpperCAmelCase__ = huber_loss_delta
UpperCAmelCase__ = temperature
UpperCAmelCase__ = aggregation_temperature
UpperCAmelCase__ = use_gumbel_for_cells
UpperCAmelCase__ = use_gumbel_for_aggregation
UpperCAmelCase__ = average_approximation_function
UpperCAmelCase__ = cell_selection_preference
UpperCAmelCase__ = answer_loss_cutoff
UpperCAmelCase__ = max_num_rows
UpperCAmelCase__ = max_num_columns
UpperCAmelCase__ = average_logits_per_cell
UpperCAmelCase__ = select_one_column
UpperCAmelCase__ = allow_empty_column_selection
UpperCAmelCase__ = init_cell_selection_weights_to_zero
UpperCAmelCase__ = reset_position_index_per_cell
UpperCAmelCase__ = disable_per_token_loss
# Aggregation hyperparameters
UpperCAmelCase__ = aggregation_labels
UpperCAmelCase__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , __a ):
UpperCAmelCase__ = {int(__a ): v for k, v in aggregation_labels.items()}
| 335 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = 1
UpperCAmelCase__ = 3
UpperCAmelCase__ = (32, 32)
UpperCAmelCase__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(__a )
@property
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
def extract(*__a , **__a ):
class lowercase :
'''simple docstring'''
def __init__(self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = torch.ones([0] )
def UpperCamelCase__ (self , __a ) -> Union[str, Any]:
"""simple docstring"""
self.pixel_values.to(__a )
return self
return Out()
return extract
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ = self.dummy_cond_unet
UpperCAmelCase__ = PNDMScheduler(skip_prk_steps=__a )
UpperCAmelCase__ = self.dummy_vae
UpperCAmelCase__ = self.dummy_text_encoder
UpperCAmelCase__ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
UpperCAmelCase__ = 77
UpperCAmelCase__ = self.dummy_image.to(__a )
UpperCAmelCase__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
UpperCAmelCase__ = AltDiffusionImgaImgPipeline(
unet=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , safety_checker=__a , feature_extractor=self.dummy_extractor , )
UpperCAmelCase__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__a )
UpperCAmelCase__ = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase__ = 'A painting of a squirrel eating a burger'
UpperCAmelCase__ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase__ = alt_pipe(
[prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=__a , )
UpperCAmelCase__ = output.images
UpperCAmelCase__ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase__ = alt_pipe(
[prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=__a , return_dict=__a , )[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
UpperCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase__ = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.dummy_cond_unet
UpperCAmelCase__ = PNDMScheduler(skip_prk_steps=__a )
UpperCAmelCase__ = self.dummy_vae
UpperCAmelCase__ = self.dummy_text_encoder
UpperCAmelCase__ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
UpperCAmelCase__ = 77
UpperCAmelCase__ = self.dummy_image.to(__a )
# put models in fp16
UpperCAmelCase__ = unet.half()
UpperCAmelCase__ = vae.half()
UpperCAmelCase__ = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase__ = AltDiffusionImgaImgPipeline(
unet=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , safety_checker=__a , feature_extractor=self.dummy_extractor , )
UpperCAmelCase__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__a )
UpperCAmelCase__ = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase__ = 'A painting of a squirrel eating a burger'
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = alt_pipe(
[prompt] , generator=__a , num_inference_steps=2 , output_type='np' , image=__a , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCAmelCase__ = init_image.resize((760, 504) )
UpperCAmelCase__ = 'BAAI/AltDiffusion'
UpperCAmelCase__ = AltDiffusionImgaImgPipeline.from_pretrained(
__a , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase__ = 'A fantasy landscape, trending on artstation'
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , generator=__a , output_type='np' , )
UpperCAmelCase__ = output.images[0]
UpperCAmelCase__ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
UpperCAmelCase__ = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
UpperCAmelCase__ = init_image.resize((768, 512) )
UpperCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
UpperCAmelCase__ = 'BAAI/AltDiffusion'
UpperCAmelCase__ = AltDiffusionImgaImgPipeline.from_pretrained(
__a , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase__ = 'A fantasy landscape, trending on artstation'
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , generator=__a , output_type='np' , )
UpperCAmelCase__ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 335 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase__ = XCLIPTextConfig()
# derive patch size from model name
UpperCAmelCase__ = model_name.find('patch' )
UpperCAmelCase__ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
UpperCAmelCase__ = XCLIPVisionConfig(patch_size=snake_case__ , num_frames=snake_case__ )
if "large" in model_name:
UpperCAmelCase__ = 7_68
UpperCAmelCase__ = 30_72
UpperCAmelCase__ = 12
UpperCAmelCase__ = 10_24
UpperCAmelCase__ = 40_96
UpperCAmelCase__ = 16
UpperCAmelCase__ = 24
UpperCAmelCase__ = 7_68
UpperCAmelCase__ = 30_72
if model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase__ = 3_36
UpperCAmelCase__ = XCLIPConfig.from_text_vision_configs(snake_case__ , snake_case__ )
if "large" in model_name:
UpperCAmelCase__ = 7_68
return config
def UpperCamelCase_( snake_case__: Any ) -> Tuple:
# text encoder
if name == "token_embedding.weight":
UpperCAmelCase__ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
UpperCAmelCase__ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
UpperCAmelCase__ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
UpperCAmelCase__ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
UpperCAmelCase__ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
UpperCAmelCase__ = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
UpperCAmelCase__ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
UpperCAmelCase__ = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
UpperCAmelCase__ = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
UpperCAmelCase__ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
UpperCAmelCase__ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
UpperCAmelCase__ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
UpperCAmelCase__ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
UpperCAmelCase__ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
UpperCAmelCase__ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
UpperCAmelCase__ = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
UpperCAmelCase__ = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
UpperCAmelCase__ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
UpperCAmelCase__ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
UpperCAmelCase__ = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
UpperCAmelCase__ = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
UpperCAmelCase__ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: List[Any] ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ = orig_state_dict.pop(snake_case__ )
if "attn.in_proj" in key:
UpperCAmelCase__ = key.split('.' )
if key.startswith('visual' ):
UpperCAmelCase__ = key_split[3]
UpperCAmelCase__ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCAmelCase__ = val[
:dim, :
]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[
-dim:, :
]
else:
UpperCAmelCase__ = val[
:dim
]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[
-dim:
]
else:
if "weight" in key:
UpperCAmelCase__ = val[
:dim, :
]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[
-dim:, :
]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[-dim:]
elif key.startswith('mit' ):
UpperCAmelCase__ = key_split[2]
UpperCAmelCase__ = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCAmelCase__ = val[:dim, :]
UpperCAmelCase__ = val[dim : dim * 2, :]
UpperCAmelCase__ = val[-dim:, :]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[dim : dim * 2]
UpperCAmelCase__ = val[-dim:]
else:
UpperCAmelCase__ = key_split[2]
UpperCAmelCase__ = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase__ = val[:dim, :]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[-dim:, :]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[-dim:]
else:
UpperCAmelCase__ = rename_key(snake_case__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCAmelCase__ = val.T
UpperCAmelCase__ = val
return orig_state_dict
def UpperCamelCase_( snake_case__: Tuple ) -> Optional[Any]:
if num_frames == 8:
UpperCAmelCase__ = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
UpperCAmelCase__ = 'eating_spaghetti.npy'
elif num_frames == 32:
UpperCAmelCase__ = 'eating_spaghetti_32_frames.npy'
UpperCAmelCase__ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=snake_case__ , repo_type='dataset' , )
UpperCAmelCase__ = np.load(snake_case__ )
return list(snake_case__ )
def UpperCamelCase_( snake_case__: Tuple , snake_case__: str=None , snake_case__: Union[str, Any]=False ) -> List[Any]:
UpperCAmelCase__ = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
UpperCAmelCase__ = model_to_url[model_name]
UpperCAmelCase__ = 8
if "16-frames" in model_name:
UpperCAmelCase__ = 16
elif "shot" in model_name:
UpperCAmelCase__ = 32
UpperCAmelCase__ = get_xclip_config(snake_case__ , snake_case__ )
UpperCAmelCase__ = XCLIPModel(snake_case__ )
model.eval()
if "drive" in checkpoint_url:
UpperCAmelCase__ = 'pytorch_model.bin'
gdown.cached_download(snake_case__ , snake_case__ , quiet=snake_case__ )
UpperCAmelCase__ = torch.load(snake_case__ , map_location='cpu' )['model']
else:
UpperCAmelCase__ = torch.hub.load_state_dict_from_url(snake_case__ )['model']
UpperCAmelCase__ = convert_state_dict(snake_case__ , snake_case__ )
UpperCAmelCase__ = XCLIPModel(snake_case__ )
UpperCAmelCase__ , UpperCAmelCase__ = model.load_state_dict(snake_case__ , strict=snake_case__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCAmelCase__ = 3_36 if model_name == 'xclip-large-patch14-16-frames' else 2_24
UpperCAmelCase__ = VideoMAEImageProcessor(size=snake_case__ )
UpperCAmelCase__ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
UpperCAmelCase__ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
UpperCAmelCase__ = XCLIPProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
UpperCAmelCase__ = prepare_video(snake_case__ )
UpperCAmelCase__ = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=snake_case__ , return_tensors='pt' , padding=snake_case__ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
UpperCAmelCase__ = model(**snake_case__ )
# Verify outputs
UpperCAmelCase__ = outputs.logits_per_video
UpperCAmelCase__ = logits_per_video.softmax(dim=1 )
print('Probs:' , snake_case__ )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCAmelCase__ = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCAmelCase__ = torch.tensor([[7.0_999e-04, 9.9_883e-01, 4.5_580e-04]] )
elif model_name == "xclip-base-patch16":
UpperCAmelCase__ = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCAmelCase__ = torch.tensor([[7.6_937e-04, 9.9_728e-01, 1.9_473e-03]] )
elif model_name == "xclip-large-patch14":
UpperCAmelCase__ = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase__ = torch.tensor([[3.3_877e-04, 9.9_937e-01, 2.8_888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCAmelCase__ = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCAmelCase__ = torch.tensor([[3.8_554e-04, 9.9_929e-01, 3.2_754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCAmelCase__ = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCAmelCase__ = torch.tensor([[7.1_890e-06, 9.9_994e-01, 5.6_559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCAmelCase__ = torch.tensor([[1.0_320e-05, 9.9_993e-01, 6.2_435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCAmelCase__ = torch.tensor([[4.1_377e-06, 9.9_990e-01, 9.8_386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCAmelCase__ = torch.tensor([[4.1_347e-05, 9.9_962e-01, 3.3_411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCAmelCase__ = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCAmelCase__ = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCAmelCase__ = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCAmelCase__ = torch.tensor([[9.8_219e-04, 9.9_593e-01, 3.0_863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCAmelCase__ = torch.tensor([[3.5_082e-04, 9.9_785e-01, 1.7_966e-03]] )
else:
raise ValueError(f"Model name {model_name} not supported" )
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(snake_case__ , organization='nielsr' )
processor.push_to_hub(snake_case__ , organization='nielsr' )
slow_tokenizer.push_to_hub(snake_case__ , organization='nielsr' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_UpperCamelCase = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 335 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def UpperCamelCase_( snake_case__: Optional[int] , snake_case__: List[Any] , snake_case__: Union[str, Any] ) -> Tuple:
UpperCAmelCase__ = OmegaConf.load(snake_case__ )
UpperCAmelCase__ = torch.load(snake_case__ , map_location='cpu' )['model']
UpperCAmelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
UpperCAmelCase__ = {}
UpperCAmelCase__ = 'first_stage_model.'
for key in keys:
if key.startswith(snake_case__ ):
UpperCAmelCase__ = state_dict[key]
# extract state_dict for UNetLDM
UpperCAmelCase__ = {}
UpperCAmelCase__ = 'model.diffusion_model.'
for key in keys:
if key.startswith(snake_case__ ):
UpperCAmelCase__ = state_dict[key]
UpperCAmelCase__ = config.model.params.first_stage_config.params
UpperCAmelCase__ = config.model.params.unet_config.params
UpperCAmelCase__ = VQModel(**snake_case__ ).eval()
vqvae.load_state_dict(snake_case__ )
UpperCAmelCase__ = UNetLDMModel(**snake_case__ ).eval()
unet.load_state_dict(snake_case__ )
UpperCAmelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=snake_case__ , )
UpperCAmelCase__ = LDMPipeline(snake_case__ , snake_case__ , snake_case__ )
pipeline.save_pretrained(snake_case__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
_UpperCamelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 335 | 1 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_UpperCamelCase = TypeVar('''T''')
class lowercase ( Generic[T] ):
'''simple docstring'''
def __init__(self , __a = True ) -> None:
"""simple docstring"""
UpperCAmelCase__ = {} # dictionary of lists
UpperCAmelCase__ = directed
def UpperCamelCase__ (self , __a , __a ) -> GraphAdjacencyList[T]:
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__a )
self.adj_list[destination_vertex].append(__a )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__a )
UpperCAmelCase__ = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__a )
UpperCAmelCase__ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
UpperCAmelCase__ = [destination_vertex]
UpperCAmelCase__ = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__a )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__a )
UpperCAmelCase__ = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
UpperCAmelCase__ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
UpperCAmelCase__ = [destination_vertex]
UpperCAmelCase__ = []
return self
def __repr__(self ) -> str:
"""simple docstring"""
return pformat(self.adj_list )
| 335 |
# flake8: noqa
# Lint as: python3
_UpperCamelCase = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 335 | 1 |
def UpperCamelCase_( snake_case__: int = 50 ) -> int:
UpperCAmelCase__ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 335 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """sew-d"""
def __init__(self , __a=32 , __a=768 , __a=12 , __a=12 , __a=3072 , __a=2 , __a=512 , __a=256 , __a=True , __a=True , __a=("p2c", "c2p") , __a="layer_norm" , __a="gelu_python" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.1 , __a=0.02 , __a=1E-7 , __a=1E-5 , __a="group" , __a="gelu" , __a=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __a=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __a=False , __a=128 , __a=16 , __a=True , __a=0.05 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=0 , __a="mean" , __a=False , __a=False , __a=256 , __a=0 , __a=1 , __a=2 , **__a , ) -> str:
"""simple docstring"""
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = feat_extract_norm
UpperCAmelCase__ = feat_extract_activation
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = conv_bias
UpperCAmelCase__ = num_conv_pos_embeddings
UpperCAmelCase__ = num_conv_pos_embedding_groups
UpperCAmelCase__ = len(self.conv_dim )
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = squeeze_factor
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = position_buckets
UpperCAmelCase__ = share_att_key
UpperCAmelCase__ = relative_attention
UpperCAmelCase__ = norm_rel_ebd
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = feat_proj_dropout
UpperCAmelCase__ = final_dropout
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = feature_layer_norm_eps
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase__ = apply_spec_augment
UpperCAmelCase__ = mask_time_prob
UpperCAmelCase__ = mask_time_length
UpperCAmelCase__ = mask_time_min_masks
UpperCAmelCase__ = mask_feature_prob
UpperCAmelCase__ = mask_feature_length
UpperCAmelCase__ = mask_feature_min_masks
# ctc loss
UpperCAmelCase__ = ctc_loss_reduction
UpperCAmelCase__ = ctc_zero_infinity
# sequence classification
UpperCAmelCase__ = use_weighted_layer_sum
UpperCAmelCase__ = classifier_proj_size
@property
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 335 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """imagegpt"""
__SCREAMING_SNAKE_CASE = ["""past_key_values"""]
__SCREAMING_SNAKE_CASE = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__(self , __a=512 + 1 , __a=32 * 32 , __a=512 , __a=24 , __a=8 , __a=None , __a="quick_gelu" , __a=0.1 , __a=0.1 , __a=0.1 , __a=1E-5 , __a=0.02 , __a=True , __a=True , __a=False , __a=False , __a=False , **__a , ) -> int:
"""simple docstring"""
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = n_positions
UpperCAmelCase__ = n_embd
UpperCAmelCase__ = n_layer
UpperCAmelCase__ = n_head
UpperCAmelCase__ = n_inner
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = resid_pdrop
UpperCAmelCase__ = embd_pdrop
UpperCAmelCase__ = attn_pdrop
UpperCAmelCase__ = layer_norm_epsilon
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = scale_attn_weights
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = scale_attn_by_inverse_layer_idx
UpperCAmelCase__ = reorder_and_upcast_attn
UpperCAmelCase__ = tie_word_embeddings
super().__init__(tie_word_embeddings=__a , **__a )
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
@property
def UpperCamelCase__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
] )
def UpperCamelCase__ (self , __a , __a = 1 , __a = -1 , __a = False , __a = None , __a = 3 , __a = 32 , __a = 32 , ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self._generate_dummy_images(__a , __a , __a , __a )
UpperCAmelCase__ = dict(preprocessor(images=__a , return_tensors=__a ) )
return inputs
| 335 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_UpperCamelCase = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCamelCase_( snake_case__: int ) -> str:
for pegasus_name, hf_name in PATTERNS:
UpperCAmelCase__ = k.replace(snake_case__ , snake_case__ )
return k
def UpperCamelCase_( snake_case__: dict , snake_case__: dict ) -> PegasusForConditionalGeneration:
UpperCAmelCase__ = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
UpperCAmelCase__ = PegasusConfig(**snake_case__ )
UpperCAmelCase__ = PegasusForConditionalGeneration(snake_case__ )
UpperCAmelCase__ = torch_model.model.state_dict()
UpperCAmelCase__ = {}
for k, v in tf_weights.items():
UpperCAmelCase__ = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
UpperCAmelCase__ = v.T
UpperCAmelCase__ = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
UpperCAmelCase__ = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
UpperCAmelCase__ = mapping['shared.weight']
UpperCAmelCase__ = mapping['shared.weight']
UpperCAmelCase__ = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
UpperCAmelCase__ , UpperCAmelCase__ = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
UpperCAmelCase__ = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def UpperCamelCase_( snake_case__: int="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
UpperCAmelCase__ = tf.train.list_variables(snake_case__ )
UpperCAmelCase__ = {}
UpperCAmelCase__ = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
UpperCAmelCase__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCAmelCase__ = tf.train.load_variable(snake_case__ , snake_case__ )
UpperCAmelCase__ = array
return tf_weights
def UpperCamelCase_( snake_case__: str , snake_case__: str ) -> Optional[Any]:
# save tokenizer first
UpperCAmelCase__ = Path(snake_case__ ).parent.name
UpperCAmelCase__ = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
UpperCAmelCase__ = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
UpperCAmelCase__ = get_tf_weights_as_numpy(snake_case__ )
UpperCAmelCase__ = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
UpperCAmelCase__ = task_specific_params
UpperCAmelCase__ = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
UpperCAmelCase__ = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_UpperCamelCase = parser.parse_args()
if args.save_dir is None:
_UpperCamelCase = Path(args.tf_ckpt_path).parent.name
_UpperCamelCase = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 335 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """swinv2"""
__SCREAMING_SNAKE_CASE = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__(self , __a=224 , __a=4 , __a=3 , __a=96 , __a=[2, 2, 6, 2] , __a=[3, 6, 12, 24] , __a=7 , __a=4.0 , __a=True , __a=0.0 , __a=0.0 , __a=0.1 , __a="gelu" , __a=False , __a=0.02 , __a=1E-5 , __a=32 , **__a , ) -> Dict:
"""simple docstring"""
super().__init__(**__a )
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = embed_dim
UpperCAmelCase__ = depths
UpperCAmelCase__ = len(__a )
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = window_size
UpperCAmelCase__ = mlp_ratio
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = use_absolute_embeddings
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase__ = int(embed_dim * 2 ** (len(__a ) - 1) )
UpperCAmelCase__ = (0, 0, 0, 0)
| 335 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase :
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = 13
UpperCAmelCase__ = 7
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = 99
UpperCAmelCase__ = 384
UpperCAmelCase__ = 2
UpperCAmelCase__ = 4
UpperCAmelCase__ = 37
UpperCAmelCase__ = 'gelu'
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 512
UpperCAmelCase__ = 16
UpperCAmelCase__ = 2
UpperCAmelCase__ = 0.02
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = 128
UpperCAmelCase__ = 2
UpperCAmelCase__ = 9
UpperCAmelCase__ = 1
UpperCAmelCase__ = None
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel(config=__a )
UpperCAmelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCAmelCase__ = [input_ids, input_mask]
UpperCAmelCase__ = model(__a )
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertForMaskedLM(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFConvBertForSequenceClassification(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = TFConvBertForMultipleChoice(config=__a )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFConvBertForTokenClassification(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertForQuestionAnswering(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__a , hidden_size=37 )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
UpperCAmelCase__ = True
if hasattr(__a , 'use_cache' ):
UpperCAmelCase__ = True
UpperCAmelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a )
for model_class in self.all_model_classes:
UpperCAmelCase__ = self._prepare_for_class(__a , __a )
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = len(model(__a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a , saved_model=__a )
UpperCAmelCase__ = os.path.join(__a , 'saved_model' , '1' )
UpperCAmelCase__ = tf.keras.models.load_model(__a )
UpperCAmelCase__ = model(__a )
if self.is_encoder_decoder:
UpperCAmelCase__ = outputs['encoder_hidden_states']
UpperCAmelCase__ = outputs['encoder_attentions']
else:
UpperCAmelCase__ = outputs['hidden_states']
UpperCAmelCase__ = outputs['attentions']
self.assertEqual(len(__a ) , __a )
UpperCAmelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(__a )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
UpperCAmelCase__ = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a )
UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a )
def check_decoder_attentions_output(__a ):
UpperCAmelCase__ = len(__a )
self.assertEqual(out_len % 2 , 0 )
UpperCAmelCase__ = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__a ):
UpperCAmelCase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@require_tf
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
UpperCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = model(__a )[0]
UpperCAmelCase__ = [1, 6, 768]
self.assertEqual(output.shape , __a )
UpperCAmelCase__ = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
| 335 | 1 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_UpperCamelCase = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def UpperCamelCase_( snake_case__: Optional[Any] , snake_case__: List[Any] , snake_case__: Union[str, Any]=None ) -> List[Any]:
if rng is None:
UpperCAmelCase__ = random.Random()
UpperCAmelCase__ = 1
for dim in shape:
total_dims *= dim
UpperCAmelCase__ = []
for _ in range(snake_case__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
UpperCAmelCase__ = np.array(snake_case__ , dtype=jnp.intaa ).reshape(snake_case__ )
return output
def UpperCamelCase_( snake_case__: Optional[Any] , snake_case__: str=None ) -> Any:
UpperCAmelCase__ = ids_tensor(snake_case__ , vocab_size=2 , rng=snake_case__ )
# make sure that at least one token is attended to for each batch
UpperCAmelCase__ = 1
return attn_mask
@require_flax
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = ()
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
UpperCAmelCase__ = 2
UpperCAmelCase__ = inputs['input_ids'].shape[-1] // 2
UpperCAmelCase__ = inputs['input_ids'][:max_batch_size, :sequence_length]
UpperCAmelCase__ = jnp.ones_like(__a )
UpperCAmelCase__ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
UpperCAmelCase__ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
UpperCAmelCase__ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._get_input_ids_and_config()
UpperCAmelCase__ = False
UpperCAmelCase__ = max_length
UpperCAmelCase__ = 0
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase__ = getattr(__a , __a )
UpperCAmelCase__ = pt_model_class(__a ).eval()
UpperCAmelCase__ = load_flax_weights_in_pytorch_model(__a , flax_model.params )
UpperCAmelCase__ = flax_model.generate(__a ).sequences
UpperCAmelCase__ = pt_model.generate(torch.tensor(__a , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
UpperCAmelCase__ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._get_input_ids_and_config()
UpperCAmelCase__ = False
UpperCAmelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
UpperCAmelCase__ = jit(model.generate )
UpperCAmelCase__ = jit_generate(__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._get_input_ids_and_config()
UpperCAmelCase__ = True
UpperCAmelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
UpperCAmelCase__ = jit(model.generate )
UpperCAmelCase__ = jit_generate(__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._get_input_ids_and_config()
UpperCAmelCase__ = False
UpperCAmelCase__ = max_length
UpperCAmelCase__ = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
UpperCAmelCase__ = jit(model.generate )
UpperCAmelCase__ = jit_generate(__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._get_input_ids_and_config()
UpperCAmelCase__ = False
UpperCAmelCase__ = max_length
UpperCAmelCase__ = 2
UpperCAmelCase__ = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._get_input_ids_and_config()
UpperCAmelCase__ = True
UpperCAmelCase__ = max_length
UpperCAmelCase__ = 0.8
UpperCAmelCase__ = 10
UpperCAmelCase__ = 0.3
UpperCAmelCase__ = 1
UpperCAmelCase__ = 8
UpperCAmelCase__ = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
UpperCAmelCase__ = jit(model.generate )
UpperCAmelCase__ = jit_generate(__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._get_input_ids_and_config()
UpperCAmelCase__ = max_length
UpperCAmelCase__ = 1
UpperCAmelCase__ = 8
UpperCAmelCase__ = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
UpperCAmelCase__ = jit(model.generate )
UpperCAmelCase__ = jit_generate(__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._get_input_ids_and_config()
UpperCAmelCase__ = max_length
UpperCAmelCase__ = 2
UpperCAmelCase__ = 1
UpperCAmelCase__ = 8
UpperCAmelCase__ = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
UpperCAmelCase__ = jit(model.generate )
UpperCAmelCase__ = jit_generate(__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase__ = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase__ = False
UpperCAmelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model.generate(__a , attention_mask=__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
UpperCAmelCase__ = jit(model.generate )
UpperCAmelCase__ = jit_generate(__a , attention_mask=__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase__ = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase__ = True
UpperCAmelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model.generate(__a , attention_mask=__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
UpperCAmelCase__ = jit(model.generate )
UpperCAmelCase__ = jit_generate(__a , attention_mask=__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase__ = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase__ = 2
UpperCAmelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model.generate(__a , attention_mask=__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
UpperCAmelCase__ = jit(model.generate )
UpperCAmelCase__ = jit_generate(__a , attention_mask=__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert' )
UpperCAmelCase__ = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
UpperCAmelCase__ = 'Hello world'
UpperCAmelCase__ = tokenizer(__a , return_tensors='np' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__a , 'do_samples' ):
model.generate(__a , do_samples=__a )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__a , 'foo' ):
UpperCAmelCase__ = {'foo': 'bar'}
model.generate(__a , **__a )
| 335 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCamelCase )
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , **__a ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__a )
requires_backends(self , 'vision' )
requires_backends(self , 'torch' )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(__a )
def UpperCamelCase__ (self , **__a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCAmelCase__ = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
UpperCAmelCase__ = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
UpperCAmelCase__ = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
UpperCAmelCase__ = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
UpperCAmelCase__ = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCAmelCase__ = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
UpperCAmelCase__ = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
UpperCAmelCase__ = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
UpperCAmelCase__ = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
UpperCAmelCase__ = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
UpperCAmelCase__ = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
UpperCAmelCase__ = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , __a , *__a , __a=None , __a=None , **__a ) -> List[str]:
"""simple docstring"""
return super().__call__(__a , *__a , num_workers=__a , batch_size=__a , **__a )
def UpperCamelCase__ (self , __a , __a=64 , __a = 0 , __a = 512 / 1500 , __a = 32 , __a = 1 , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = load_image(__a )
UpperCAmelCase__ = self.image_processor.size['longest_edge']
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.generate_crop_boxes(
__a , __a , __a , __a , __a , __a )
UpperCAmelCase__ = self.image_processor(images=__a , return_tensors='pt' )
with self.device_placement():
if self.framework == "pt":
UpperCAmelCase__ = self.get_inference_context()
with inference_context():
UpperCAmelCase__ = self._ensure_tensor_on_device(__a , device=self.device )
UpperCAmelCase__ = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) )
UpperCAmelCase__ = image_embeddings
UpperCAmelCase__ = grid_points.shape[1]
UpperCAmelCase__ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None' )
for i in range(0 , __a , __a ):
UpperCAmelCase__ = grid_points[:, i : i + points_per_batch, :, :]
UpperCAmelCase__ = input_labels[:, i : i + points_per_batch]
UpperCAmelCase__ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase__ (self , __a , __a=0.88 , __a=0.95 , __a=0 , __a=1 , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = model_inputs.pop('input_boxes' )
UpperCAmelCase__ = model_inputs.pop('is_last' )
UpperCAmelCase__ = model_inputs.pop('original_sizes' ).tolist()
UpperCAmelCase__ = model_inputs.pop('reshaped_input_sizes' ).tolist()
UpperCAmelCase__ = self.model(**__a )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCAmelCase__ = model_outputs['pred_masks']
UpperCAmelCase__ = self.image_processor.post_process_masks(
__a , __a , __a , __a , binarize=__a )
UpperCAmelCase__ = model_outputs['iou_scores']
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __a , __a , __a , __a , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase__ (self , __a , __a=False , __a=False , __a=0.7 , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores' ) )
all_masks.extend(model_output.pop('masks' ) )
all_boxes.append(model_output.pop('boxes' ) )
UpperCAmelCase__ = torch.cat(__a )
UpperCAmelCase__ = torch.cat(__a )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.post_process_for_mask_generation(
__a , __a , __a , __a )
UpperCAmelCase__ = defaultdict(__a )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__a )
UpperCAmelCase__ = {}
if output_rle_mask:
UpperCAmelCase__ = rle_mask
if output_bboxes_mask:
UpperCAmelCase__ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 335 | 1 |
from __future__ import annotations
def UpperCamelCase_( snake_case__: int , snake_case__: int ) -> list[list[int]]:
UpperCAmelCase__ = []
create_all_state(1 , snake_case__ , snake_case__ , [] , snake_case__ )
return result
def UpperCamelCase_( snake_case__: int , snake_case__: int , snake_case__: int , snake_case__: list[int] , snake_case__: list[list[int]] , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(snake_case__ , total_number - level + 2 ):
current_list.append(snake_case__ )
create_all_state(i + 1 , snake_case__ , level - 1 , snake_case__ , snake_case__ )
current_list.pop()
def UpperCamelCase_( snake_case__: list[list[int]] ) -> None:
for i in total_list:
print(*snake_case__ )
if __name__ == "__main__":
_UpperCamelCase = 4
_UpperCamelCase = 2
_UpperCamelCase = generate_all_combinations(n, k)
print_all_state(total_list)
| 335 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be trained."""} )
__SCREAMING_SNAKE_CASE = field(
default="""./""" , metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path of training dataset."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
__SCREAMING_SNAKE_CASE = field(default=2 , metadata={"""help""": """Batch size for training."""} )
__SCREAMING_SNAKE_CASE = field(default=2 , metadata={"""help""": """Batch size for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(default=0.1 , metadata={"""help""": """Value of weight decay."""} )
__SCREAMING_SNAKE_CASE = field(
default=10000 , metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} )
__SCREAMING_SNAKE_CASE = field(default=2E-4 , metadata={"""help""": """Learning rate fo training."""} )
__SCREAMING_SNAKE_CASE = field(default="""cosine""" , metadata={"""help""": """Learning rate."""} )
__SCREAMING_SNAKE_CASE = field(
default=750 , metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} )
__SCREAMING_SNAKE_CASE = field(
default=16 , metadata={"""help""": """Number of gradient accumulation steps."""} )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} )
__SCREAMING_SNAKE_CASE = field(default=50000 , metadata={"""help""": """Maximum number of training steps."""} )
__SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=1024 , metadata={"""help""": """Sequence lengths used for training."""} )
__SCREAMING_SNAKE_CASE = field(default=1 , metadata={"""help""": """Training seed."""} )
__SCREAMING_SNAKE_CASE = field(
default=1024 , metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """If True the data is pretokenized."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
__SCREAMING_SNAKE_CASE = field(default=2 , metadata={"""help""": """Batch size used for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=1024 , metadata={"""help""": """Length of sequences to be evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Number of workers used for code evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Sample from the language model's output distribution."""} )
__SCREAMING_SNAKE_CASE = field(default=0.2 , metadata={"""help""": """Sampling temperature used for generation."""} )
__SCREAMING_SNAKE_CASE = field(default=256 , metadata={"""help""": """Maximum number of newly generated tokens."""} )
__SCREAMING_SNAKE_CASE = field(default=0 , metadata={"""help""": """Top-k parameter used for generation."""} )
__SCREAMING_SNAKE_CASE = field(default=0.95 , metadata={"""help""": """Top-p parameter used for nucleus sampling."""} )
__SCREAMING_SNAKE_CASE = field(default=10 , metadata={"""help""": """Number of generations to run in parallel."""} )
__SCREAMING_SNAKE_CASE = field(
default=200 , metadata={"""help""": """Number of completions to generate for each sample."""} )
__SCREAMING_SNAKE_CASE = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default="""eval_results.json""" , metadata={"""help""": """Random seed used for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default="""0""" , metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} )
__SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} , )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} , )
__SCREAMING_SNAKE_CASE = field(
default="""transformersbook/codeparrot""" , metadata={"""help""": """Folder or name of dataset to process."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot-clean""" , metadata={"""help""": """Folder to save processed processed dataset."""} )
__SCREAMING_SNAKE_CASE = field(
default=100000 , metadata={"""help""": """Number of files to save per JSON output file."""} )
__SCREAMING_SNAKE_CASE = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
__SCREAMING_SNAKE_CASE = field(
default=1000 , metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=100 , metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.25 , metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=1.5 , metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.7 , metadata={"""help""": """Probability for filtering config, test and uncommon files."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """If True, near-duplicate samples are removed."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.85 , metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""gpt2""" , metadata={"""help""": """Base tokenizer to build new tokenizer from."""} )
__SCREAMING_SNAKE_CASE = field(
default="""transformersbook/codeparrot-train""" , metadata={"""help""": """Dataset to train tokenizer on."""} )
__SCREAMING_SNAKE_CASE = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
__SCREAMING_SNAKE_CASE = field(default=200000 , metadata={"""help""": """Number of examples to train tokenizer on."""} )
__SCREAMING_SNAKE_CASE = field(
default=32768 , metadata={"""help""": """Number of examples to train the tokenizer on."""} )
__SCREAMING_SNAKE_CASE = field(default="""codeparrot""" , metadata={"""help""": """Name of new tokenizer."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Push saved tokenizer to the hub."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path to the dataset to pretokenize."""} )
__SCREAMING_SNAKE_CASE = field(
default="""tokenized-codeparrot-train""" , metadata={"""help""": """Repo name of the pretokenized data."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Number of workers used for code evaluation."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""gpt2-large""" , metadata={"""help""": """Configuration to use for model initialization."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Tokenizer attached to model."""} )
__SCREAMING_SNAKE_CASE = field(default="""codeparrot""" , metadata={"""help""": """Name of the created model."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Push saved tokenizer to the hub."""} )
| 335 | 1 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_UpperCamelCase = logging.getLogger(__name__)
def UpperCamelCase_( snake_case__: Optional[int] , snake_case__: Any ) -> Any:
# save results
if os.path.exists(snake_case__ ):
if os.path.exists(os.path.join(snake_case__ , 'config.json' ) ) and os.path.isfile(
os.path.join(snake_case__ , 'config.json' ) ):
os.remove(os.path.join(snake_case__ , 'config.json' ) )
if os.path.exists(os.path.join(snake_case__ , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(snake_case__ , 'pytorch_model.bin' ) ):
os.remove(os.path.join(snake_case__ , 'pytorch_model.bin' ) )
else:
os.makedirs(snake_case__ )
model.save_pretrained(snake_case__ )
def UpperCamelCase_( snake_case__: Tuple , snake_case__: Dict=False ) -> List[Any]:
UpperCAmelCase__ = 2
if unlogit:
UpperCAmelCase__ = torch.pow(snake_case__ , snake_case__ )
UpperCAmelCase__ = p * torch.log(snake_case__ )
UpperCAmelCase__ = 0
return -plogp.sum(dim=-1 )
def UpperCamelCase_( snake_case__: Optional[Any] ) -> str:
logger.info('lv, h >\t' + '\t'.join(f"{x + 1}" for x in range(len(snake_case__ ) ) ) )
for row in range(len(snake_case__ ) ):
if tensor.dtype != torch.long:
logger.info(f"layer {row + 1}:\t" + '\t'.join(f"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(f"layer {row + 1}:\t" + '\t'.join(f"{x:d}" for x in tensor[row].cpu().data ) )
def UpperCamelCase_( snake_case__: str , snake_case__: int , snake_case__: List[Any] , snake_case__: List[str]=True , snake_case__: Dict=True , snake_case__: Optional[Any]=None , snake_case__: Any=False ) -> Tuple:
UpperCAmelCase__ , UpperCAmelCase__ = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase__ = torch.zeros(snake_case__ , snake_case__ ).to(args.device )
UpperCAmelCase__ = torch.zeros(snake_case__ , snake_case__ ).to(args.device )
if head_mask is None:
UpperCAmelCase__ = torch.ones(snake_case__ , snake_case__ ).to(args.device )
head_mask.requires_grad_(requires_grad=snake_case__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase__ = None
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 0.0
for step, inputs in enumerate(tqdm(snake_case__ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase__ = tuple(t.to(args.device ) for t in inputs )
((UpperCAmelCase__) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase__ = model(snake_case__ , labels=snake_case__ , head_mask=snake_case__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(snake_case__ ):
UpperCAmelCase__ = entropy(attn.detach() , snake_case__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(snake_case__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase__ = 2
UpperCAmelCase__ = torch.pow(torch.pow(snake_case__ , snake_case__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
UpperCAmelCase__ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(snake_case__ )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(snake_case__ )
logger.info('Head ranked by importance scores' )
UpperCAmelCase__ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase__ = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase__ = head_ranks.view_as(snake_case__ )
print_ad_tensor(snake_case__ )
return attn_entropy, head_importance, total_loss
def UpperCamelCase_( snake_case__: Any , snake_case__: Union[str, Any] , snake_case__: List[str] ) -> Tuple:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = compute_heads_importance(snake_case__ , snake_case__ , snake_case__ , compute_entropy=snake_case__ )
UpperCAmelCase__ = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , snake_case__ , original_score * args.masking_threshold )
UpperCAmelCase__ = torch.ones_like(snake_case__ )
UpperCAmelCase__ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase__ = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase__ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase__ = float('Inf' )
UpperCAmelCase__ = head_importance.view(-1 ).sort()[1]
if len(snake_case__ ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
UpperCAmelCase__ = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase__ = new_head_mask.view(-1 )
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = new_head_mask.view_as(snake_case__ )
UpperCAmelCase__ = new_head_mask.clone().detach()
print_ad_tensor(snake_case__ )
# Compute metric and head importance again
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = compute_heads_importance(
snake_case__ , snake_case__ , snake_case__ , compute_entropy=snake_case__ , head_mask=snake_case__ )
UpperCAmelCase__ = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , snake_case__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(snake_case__ )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def UpperCamelCase_( snake_case__: int , snake_case__: Optional[int] , snake_case__: Optional[int] , snake_case__: Optional[int] ) -> Union[str, Any]:
UpperCAmelCase__ = datetime.now()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = compute_heads_importance(
snake_case__ , snake_case__ , snake_case__ , compute_entropy=snake_case__ , compute_importance=snake_case__ , head_mask=snake_case__ )
UpperCAmelCase__ = 1 / loss
UpperCAmelCase__ = datetime.now() - before_time
UpperCAmelCase__ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase__ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(snake_case__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase__ = [
v,
]
assert sum(len(snake_case__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(snake_case__ )
UpperCAmelCase__ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase__ = datetime.now()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = compute_heads_importance(
snake_case__ , snake_case__ , snake_case__ , compute_entropy=snake_case__ , compute_importance=snake_case__ , head_mask=snake_case__ , actually_pruned=snake_case__ , )
UpperCAmelCase__ = 1 / loss
UpperCAmelCase__ = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , snake_case__ , snake_case__ , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , snake_case__ , snake_case__ )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(snake_case__ , args.output_dir )
def UpperCamelCase_( ) -> List[str]:
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=snake_case__ , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=snake_case__ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=snake_case__ , type=snake_case__ , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=snake_case__ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=snake_case__ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=snake_case__ , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=snake_case__ , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=snake_case__ , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=snake_case__ , help='Batch size.' )
parser.add_argument('--seed' , type=snake_case__ , default=42 )
parser.add_argument('--local_rank' , type=snake_case__ , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=snake_case__ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=snake_case__ , default='' , help='Can be used for distant debugging.' )
UpperCAmelCase__ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=snake_case__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase__ = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
UpperCAmelCase__ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase__ = torch.device('cuda' , args.local_rank )
UpperCAmelCase__ = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase__ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase__ = nn.parallel.DistributedDataParallel(
snake_case__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=snake_case__ )
elif args.n_gpu > 1:
UpperCAmelCase__ = nn.DataParallel(snake_case__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=snake_case__ )
torch.save(snake_case__ , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , snake_case__ )
# Prepare dataset
UpperCAmelCase__ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase__ = (torch.from_numpy(snake_case__ ),)
UpperCAmelCase__ = TensorDataset(*snake_case__ )
UpperCAmelCase__ = RandomSampler(snake_case__ )
UpperCAmelCase__ = DataLoader(snake_case__ , sampler=snake_case__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(snake_case__ , snake_case__ , snake_case__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase__ = mask_heads(snake_case__ , snake_case__ , snake_case__ )
prune_heads(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 335 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=4 , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_attention_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_choices
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_attention_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = True
UpperCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = FlaxRobertaModelTester(self )
@slow
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase__ = model_class_name.from_pretrained('roberta-base' , from_pt=__a )
UpperCAmelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__a )
| 335 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCamelCase = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , *__a , **__a ) -> None:
"""simple docstring"""
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , __a , )
super().__init__(*__a , **__a )
| 335 | 1 |
import requests
from bsa import BeautifulSoup
def UpperCamelCase_( snake_case__: str = "AAPL" ) -> str:
UpperCAmelCase__ = f"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
UpperCAmelCase__ = BeautifulSoup(requests.get(snake_case__ ).text , 'html.parser' )
UpperCAmelCase__ = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 335 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 | 1 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
_UpperCamelCase = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCamelCase_( snake_case__: List[str] , snake_case__: List[Any] , snake_case__: Union[str, Any] , snake_case__: List[str] , snake_case__: Optional[int] ) -> List[Any]:
for attribute in key.split('.' ):
UpperCAmelCase__ = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
UpperCAmelCase__ = getattr(snake_case__ , snake_case__ ).shape
else:
UpperCAmelCase__ = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
UpperCAmelCase__ = value
elif weight_type == "weight_g":
UpperCAmelCase__ = value
elif weight_type == "weight_v":
UpperCAmelCase__ = value
elif weight_type == "bias":
UpperCAmelCase__ = value
else:
UpperCAmelCase__ = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCamelCase_( snake_case__: int , snake_case__: List[Any] ) -> List[str]:
UpperCAmelCase__ = []
UpperCAmelCase__ = fairseq_model.state_dict()
UpperCAmelCase__ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == 'group' , )
UpperCAmelCase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCAmelCase__ = True
if "*" in mapped_key:
UpperCAmelCase__ = name.split(snake_case__ )[0].split('.' )[-2]
UpperCAmelCase__ = mapped_key.replace('*' , snake_case__ )
if "weight_g" in name:
UpperCAmelCase__ = 'weight_g'
elif "weight_v" in name:
UpperCAmelCase__ = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
UpperCAmelCase__ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase__ = 'weight'
else:
UpperCAmelCase__ = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(f"Unused weights: {unused_weights}" )
def UpperCamelCase_( snake_case__: Optional[int] , snake_case__: Union[str, Any] , snake_case__: List[str] , snake_case__: Optional[int] , snake_case__: str ) -> Union[str, Any]:
UpperCAmelCase__ = full_name.split('conv_layers.' )[-1]
UpperCAmelCase__ = name.split('.' )
UpperCAmelCase__ = int(items[0] )
UpperCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
UpperCAmelCase__ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
UpperCAmelCase__ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
UpperCAmelCase__ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
UpperCAmelCase__ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case__ )
@torch.no_grad()
def UpperCamelCase_( snake_case__: List[str] , snake_case__: Optional[int] , snake_case__: int=None ) -> int:
# load the pre-trained checkpoints
UpperCAmelCase__ = torch.load(snake_case__ )
UpperCAmelCase__ = WavLMConfigOrig(checkpoint['cfg'] )
UpperCAmelCase__ = WavLMOrig(snake_case__ )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
UpperCAmelCase__ = WavLMConfig.from_pretrained(snake_case__ )
else:
UpperCAmelCase__ = WavLMConfig()
UpperCAmelCase__ = WavLMModel(snake_case__ )
recursively_load_weights(snake_case__ , snake_case__ )
hf_wavlm.save_pretrained(snake_case__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
_UpperCamelCase = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 335 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self , __a ) -> List[Any]:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
UpperCAmelCase__ = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__a )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__a , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = 'sgugger/tiny-distilbert-classification'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , only_pretrain_model=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = AutoConfig.from_pretrained(__a )
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__a , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a , [config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = AutoConfig.from_pretrained(__a )
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a , [config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = AutoConfig.from_pretrained(__a )
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a , [config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = 'patrickvonplaten/t5-tiny-random'
UpperCAmelCase__ = AutoConfig.from_pretrained(__a )
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a , configs=[config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__a , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__a , save_to_csv=__a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(__a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(__a , 'env.csv' ) , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
benchmark.run()
self.assertTrue(Path(os.path.join(__a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__a , 'env.csv' ) ).exists() )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__a ):
self.assertTrue(hasattr(__a , 'sequential' ) )
self.assertTrue(hasattr(__a , 'cumulative' ) )
self.assertTrue(hasattr(__a , 'current' ) )
self.assertTrue(hasattr(__a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__a , 'log.txt' ) , log_print=__a , trace_memory_line_by_line=__a , eager_mode=__a , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__a , 'log.txt' ) ).exists() )
| 335 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = 0
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32' )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ = Path(__a ) / 'preprocessor_config.json'
UpperCAmelCase__ = Path(__a ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(__a , 'w' ) , )
json.dump({'model_type': 'clip'} , open(__a , 'w' ) )
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ = Path(__a ) / 'preprocessor_config.json'
UpperCAmelCase__ = Path(__a ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(__a , 'w' ) , )
json.dump({'model_type': 'clip'} , open(__a , 'w' ) )
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
UpperCAmelCase__ = Path(__a ) / 'preprocessor_config.json'
UpperCAmelCase__ = Path(__a ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(__a , 'w' ) , )
json.dump({'model_type': 'clip'} , open(__a , 'w' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(__a ).to_dict()
config_dict.pop('image_processor_type' )
UpperCAmelCase__ = CLIPImageProcessor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
UpperCAmelCase__ = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ = Path(__a ) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(__a , 'w' ) , )
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
__a , 'clip-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase__ = AutoImageProcessor.from_pretrained('clip-base' )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
__a , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(__a , revision='aaaaaa' )
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
__a , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCAmelCase__ = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
with self.assertRaises(__a ):
UpperCAmelCase__ = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=__a )
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor' )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
try:
AutoConfig.register('custom' , __a )
AutoImageProcessor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoImageProcessor.register(__a , __a )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ = Path(__a ) / 'preprocessor_config.json'
UpperCAmelCase__ = Path(__a ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(__a , 'w' ) , )
json.dump({'model_type': 'clip'} , open(__a , 'w' ) )
UpperCAmelCase__ = CustomImageProcessor.from_pretrained(__a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = True
try:
AutoConfig.register('custom' , __a )
AutoImageProcessor.register(__a , __a )
# If remote code is not set, the default is to use local
UpperCAmelCase__ = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(not hasattr(__a , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 335 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 335 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase :
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = 13
UpperCAmelCase__ = 7
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = 99
UpperCAmelCase__ = 384
UpperCAmelCase__ = 2
UpperCAmelCase__ = 4
UpperCAmelCase__ = 37
UpperCAmelCase__ = 'gelu'
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 512
UpperCAmelCase__ = 16
UpperCAmelCase__ = 2
UpperCAmelCase__ = 0.02
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = 128
UpperCAmelCase__ = 2
UpperCAmelCase__ = 9
UpperCAmelCase__ = 1
UpperCAmelCase__ = None
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel(config=__a )
UpperCAmelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCAmelCase__ = [input_ids, input_mask]
UpperCAmelCase__ = model(__a )
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertForMaskedLM(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFConvBertForSequenceClassification(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = TFConvBertForMultipleChoice(config=__a )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFConvBertForTokenClassification(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertForQuestionAnswering(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__a , hidden_size=37 )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
UpperCAmelCase__ = True
if hasattr(__a , 'use_cache' ):
UpperCAmelCase__ = True
UpperCAmelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a )
for model_class in self.all_model_classes:
UpperCAmelCase__ = self._prepare_for_class(__a , __a )
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = len(model(__a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a , saved_model=__a )
UpperCAmelCase__ = os.path.join(__a , 'saved_model' , '1' )
UpperCAmelCase__ = tf.keras.models.load_model(__a )
UpperCAmelCase__ = model(__a )
if self.is_encoder_decoder:
UpperCAmelCase__ = outputs['encoder_hidden_states']
UpperCAmelCase__ = outputs['encoder_attentions']
else:
UpperCAmelCase__ = outputs['hidden_states']
UpperCAmelCase__ = outputs['attentions']
self.assertEqual(len(__a ) , __a )
UpperCAmelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(__a )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
UpperCAmelCase__ = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a )
UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a )
def check_decoder_attentions_output(__a ):
UpperCAmelCase__ = len(__a )
self.assertEqual(out_len % 2 , 0 )
UpperCAmelCase__ = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__a ):
UpperCAmelCase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@require_tf
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
UpperCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = model(__a )[0]
UpperCAmelCase__ = [1, 6, 768]
self.assertEqual(output.shape , __a )
UpperCAmelCase__ = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
| 335 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__(self , *,
__a = 4 , __a = 768 , __a , __a , ) -> str:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Parameter(torch.zeros(__a ) )
# parameters for additional clip time embeddings
UpperCAmelCase__ = nn.Linear(__a , __a )
UpperCAmelCase__ = nn.Linear(__a , __a )
# parameters for encoder hidden states
UpperCAmelCase__ = clip_extra_context_tokens
UpperCAmelCase__ = nn.Linear(
__a , self.clip_extra_context_tokens * cross_attention_dim )
UpperCAmelCase__ = nn.Linear(__a , __a )
UpperCAmelCase__ = nn.LayerNorm(__a )
def UpperCamelCase__ (self , *, __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCAmelCase__ = image_embeddings.shape[0]
UpperCAmelCase__ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCAmelCase__ = classifier_free_guidance_embeddings.expand(
__a , -1 )
UpperCAmelCase__ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCAmelCase__ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCAmelCase__ = self.embedding_proj(__a )
UpperCAmelCase__ = self.clip_image_embeddings_project_to_time_embeddings(__a )
UpperCAmelCase__ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCAmelCase__ = self.clip_extra_context_tokens_proj(__a )
UpperCAmelCase__ = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens )
UpperCAmelCase__ = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCAmelCase__ = self.encoder_hidden_states_proj(__a )
UpperCAmelCase__ = self.text_encoder_hidden_states_norm(__a )
UpperCAmelCase__ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 335 | 1 |
import mpmath # for roots of unity
import numpy as np
class lowercase :
'''simple docstring'''
def __init__(self , __a=None , __a=None ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = list(poly_a or [0] )[:]
UpperCAmelCase__ = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
UpperCAmelCase__ = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
UpperCAmelCase__ = len(self.polyB )
# Add 0 to make lengths equal a power of 2
UpperCAmelCase__ = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
UpperCAmelCase__ = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
UpperCAmelCase__ = self.__multiply()
def UpperCamelCase__ (self , __a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(__a ) <= 1:
return dft[0]
#
UpperCAmelCase__ = self.c_max_length // 2
while next_ncol > 0:
UpperCAmelCase__ = [[] for i in range(__a )]
UpperCAmelCase__ = self.root**next_ncol
# First half of next step
UpperCAmelCase__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__a ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
UpperCAmelCase__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__a ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
UpperCAmelCase__ = new_dft
UpperCAmelCase__ = next_ncol // 2
return dft[0]
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.__dft('A' )
UpperCAmelCase__ = self.__dft('B' )
UpperCAmelCase__ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
UpperCAmelCase__ = 2
while next_ncol <= self.c_max_length:
UpperCAmelCase__ = [[] for i in range(__a )]
UpperCAmelCase__ = self.root ** (next_ncol // 2)
UpperCAmelCase__ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
UpperCAmelCase__ = new_inverse_c
next_ncol *= 2
# Unpack
UpperCAmelCase__ = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__(self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = 'A = ' + ' + '.join(
F"{coef}*x^{i}" for coef, i in enumerate(self.polyA[: self.len_A] ) )
UpperCAmelCase__ = 'B = ' + ' + '.join(
F"{coef}*x^{i}" for coef, i in enumerate(self.polyB[: self.len_B] ) )
UpperCAmelCase__ = 'A*B = ' + ' + '.join(
F"{coef}*x^{i}" for coef, i in enumerate(self.product ) )
return F"{a}\n{b}\n{c}"
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BioGptTokenizer
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
UpperCAmelCase__ = dict(zip(__a , range(len(__a ) ) ) )
UpperCAmelCase__ = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(__a ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(__a ) )
def UpperCamelCase__ (self , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = 'lower newer'
UpperCAmelCase__ = 'lower newer'
return input_text, output_text
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase__ = 'lower'
UpperCAmelCase__ = ['low', 'er</w>']
UpperCAmelCase__ = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase__ = tokens + ['<unk>']
UpperCAmelCase__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
@slow
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
UpperCAmelCase__ = tokenizer.encode('sequence builders' , add_special_tokens=__a )
UpperCAmelCase__ = tokenizer.encode('multi-sequence build' , add_special_tokens=__a )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__a , __a )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 335 | 1 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_UpperCamelCase = 0
_UpperCamelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_UpperCamelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_UpperCamelCase = tuple[int, int]
class lowercase :
'''simple docstring'''
def __init__(self , __a , __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
UpperCAmelCase__ = pos_x
UpperCAmelCase__ = pos_y
UpperCAmelCase__ = (pos_y, pos_x)
UpperCAmelCase__ = goal_x
UpperCAmelCase__ = goal_y
UpperCAmelCase__ = g_cost
UpperCAmelCase__ = parent
UpperCAmelCase__ = self.calculate_heuristic()
UpperCAmelCase__ = self.g_cost + self.h_cost
def UpperCamelCase__ (self ) -> float:
"""simple docstring"""
UpperCAmelCase__ = self.pos_x - self.goal_x
UpperCAmelCase__ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(__a ) + abs(__a )
else:
return sqrt(dy**2 + dx**2 )
def __lt__(self , __a ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class lowercase :
'''simple docstring'''
def __init__(self , __a , __a ) -> str:
"""simple docstring"""
UpperCAmelCase__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __a )
UpperCAmelCase__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , __a )
UpperCAmelCase__ = [self.start]
UpperCAmelCase__ = []
UpperCAmelCase__ = False
def UpperCamelCase__ (self ) -> list[TPosition]:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase__ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(__a )
self.closed_nodes.append(__a )
UpperCAmelCase__ = self.get_successors(__a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__a )
else:
# retrieve the best current path
UpperCAmelCase__ = self.open_nodes.pop(self.open_nodes.index(__a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__a )
else:
self.open_nodes.append(__a )
return [self.start.pos]
def UpperCamelCase__ (self , __a ) -> list[Node]:
"""simple docstring"""
UpperCAmelCase__ = []
for action in delta:
UpperCAmelCase__ = parent.pos_x + action[1]
UpperCAmelCase__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__a , __a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __a , ) )
return successors
def UpperCamelCase__ (self , __a ) -> list[TPosition]:
"""simple docstring"""
UpperCAmelCase__ = node
UpperCAmelCase__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase__ = current_node.parent
path.reverse()
return path
class lowercase :
'''simple docstring'''
def __init__(self , __a , __a ) -> None:
"""simple docstring"""
UpperCAmelCase__ = AStar(__a , __a )
UpperCAmelCase__ = AStar(__a , __a )
UpperCAmelCase__ = False
def UpperCamelCase__ (self ) -> list[TPosition]:
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCAmelCase__ = self.fwd_astar.open_nodes.pop(0 )
UpperCAmelCase__ = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
__a , __a )
self.fwd_astar.closed_nodes.append(__a )
self.bwd_astar.closed_nodes.append(__a )
UpperCAmelCase__ = current_bwd_node
UpperCAmelCase__ = current_fwd_node
UpperCAmelCase__ = {
self.fwd_astar: self.fwd_astar.get_successors(__a ),
self.bwd_astar: self.bwd_astar.get_successors(__a ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(__a )
else:
# retrieve the best current path
UpperCAmelCase__ = astar.open_nodes.pop(
astar.open_nodes.index(__a ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(__a )
else:
astar.open_nodes.append(__a )
return [self.fwd_astar.start.pos]
def UpperCamelCase__ (self , __a , __a ) -> list[TPosition]:
"""simple docstring"""
UpperCAmelCase__ = self.fwd_astar.retrace_path(__a )
UpperCAmelCase__ = self.bwd_astar.retrace_path(__a )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_UpperCamelCase = (0, 0)
_UpperCamelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_UpperCamelCase = time.time()
_UpperCamelCase = AStar(init, goal)
_UpperCamelCase = a_star.search()
_UpperCamelCase = time.time() - start_time
print(F"""AStar execution time = {end_time:f} seconds""")
_UpperCamelCase = time.time()
_UpperCamelCase = BidirectionalAStar(init, goal)
_UpperCamelCase = time.time() - bd_start_time
print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 335 |
class lowercase : # Public class to implement a graph
'''simple docstring'''
def __init__(self , __a , __a , __a ) -> None:
"""simple docstring"""
UpperCAmelCase__ = row
UpperCAmelCase__ = col
UpperCAmelCase__ = graph
def UpperCamelCase__ (self , __a , __a , __a ) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCamelCase__ (self , __a , __a , __a ) -> None:
"""simple docstring"""
UpperCAmelCase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
UpperCAmelCase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
UpperCAmelCase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __a )
def UpperCamelCase__ (self ) -> int: # And finally, count all islands.
"""simple docstring"""
UpperCAmelCase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
UpperCAmelCase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__a , __a , __a )
count += 1
return count
| 335 | 1 |
from __future__ import annotations
def UpperCamelCase_( snake_case__: list[int] , snake_case__: list[int] , snake_case__: int ) -> tuple[float, list[float]]:
UpperCAmelCase__ = list(range(len(snake_case__ ) ) )
UpperCAmelCase__ = [v / w for v, w in zip(snake_case__ , snake_case__ )]
index.sort(key=lambda snake_case__ : ratio[i] , reverse=snake_case__ )
UpperCAmelCase__ = 0
UpperCAmelCase__ = [0] * len(snake_case__ )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase__ = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_UpperCamelCase = Lock()
def UpperCamelCase_( snake_case__: Optional[Any] , snake_case__: Optional[int] , snake_case__: Tuple , snake_case__: Tuple , snake_case__: Tuple , snake_case__: Dict , snake_case__: Any ) -> str:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase__ = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase__ = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def UpperCamelCase_( snake_case__: Any ) -> Tuple:
UpperCAmelCase__ = []
UpperCAmelCase__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase__ = Pipe()
UpperCAmelCase__ = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase__ = temp_rs
UpperCAmelCase__ = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
UpperCAmelCase__ = Pipe()
UpperCAmelCase__ = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase__ = temp_rs
UpperCAmelCase__ = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
UpperCAmelCase__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase_( ) -> Dict:
UpperCAmelCase__ = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
UpperCAmelCase__ = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main()
| 335 | 1 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
'''simple docstring'''
def __init__(self , __a , __a=3 , __a=32 , __a=3 , __a=10 , __a=[8, 16, 32, 64] , __a=[1, 1, 2, 1] , __a=True , __a=True , __a="relu" , __a=3 , __a=None , __a=["stage2", "stage3", "stage4"] , __a=[2, 3, 4] , __a=1 , ) -> int:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = embeddings_size
UpperCAmelCase__ = hidden_sizes
UpperCAmelCase__ = depths
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = scope
UpperCAmelCase__ = len(__a )
UpperCAmelCase__ = out_features
UpperCAmelCase__ = out_indices
UpperCAmelCase__ = num_groups
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def UpperCamelCase__ (self , __a , __a , __a ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = BitModel(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase__ (self , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = BitForImageClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ (self , __a , __a , __a ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = BitBackbone(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase__ = None
UpperCAmelCase__ = BitBackbone(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = BitModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__a , has_text_modality=__a )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
return
@unittest.skip(reason='Bit does not output attentions' )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
pass
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(config=__a )
for name, module in model.named_modules():
if isinstance(__a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
def check_hidden_states_output(__a , __a , __a ):
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase__ = layer_type
UpperCAmelCase__ = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
check_hidden_states_output(__a , __a , __a )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
pass
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = BitModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def UpperCamelCase_( ) -> Dict:
UpperCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__a )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = image_processor(images=__a , return_tensors='pt' ).to(__a )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**__a )
# verify the logits
UpperCAmelCase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
UpperCAmelCase__ = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@require_torch
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (BitBackbone,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = BitConfig
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = BitModelTester(self )
| 335 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
'''simple docstring'''
def __init__(self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = ''
UpperCAmelCase__ = ''
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = 256
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
def UpperCamelCase__ (self , __a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = cva.imread(__a , 0 )
UpperCAmelCase__ = copy.deepcopy(self.img )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
UpperCAmelCase__ = np.sum(__a )
for i in range(len(__a ) ):
UpperCAmelCase__ = x[i] / self.k
self.sk += prk
UpperCAmelCase__ = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase__ = int(last % last )
UpperCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__a )
UpperCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase__ = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase__ = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_UpperCamelCase = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
_UpperCamelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 335 | 1 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
_UpperCamelCase = logging.getLogger()
def UpperCamelCase_( snake_case__: Path , snake_case__: list ) -> Union[str, Any]:
UpperCAmelCase__ = '\n'.join(snake_case__ )
Path(snake_case__ ).open('w' ).writelines(snake_case__ )
_UpperCamelCase = '''patrickvonplaten/t5-tiny-random'''
_UpperCamelCase = '''sshleifer/bart-tiny-random'''
_UpperCamelCase = '''sshleifer/tiny-mbart'''
_UpperCamelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def UpperCamelCase__ (self , __a ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
UpperCAmelCase__ = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
UpperCAmelCase__ = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(__a , __a )
UpperCAmelCase__ = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
UpperCAmelCase__ = 'translation_en_to_de' if model == T5_TINY else 'summarization'
UpperCAmelCase__ = F"\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n ".split()
with patch.object(__a , 'argv' , __a ):
run_generate()
assert Path(__a ).exists()
# os.remove(Path(output_file_name))
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
self.run_eval_tester(__a )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def UpperCamelCase__ (self , __a ) -> Dict:
"""simple docstring"""
self.run_eval_tester(__a )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def UpperCamelCase__ (self , __a ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
UpperCAmelCase__ = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
UpperCAmelCase__ = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
UpperCAmelCase__ = Path(self.get_auto_remove_tmp_dir() )
UpperCAmelCase__ = str(tmp_dir / 'scores.json' )
UpperCAmelCase__ = str(tmp_dir / 'val.target' )
_dump_articles(__a , text['en'] )
_dump_articles(__a , text['de'] )
UpperCAmelCase__ = 'translation_en_to_de' if model == T5_TINY else 'summarization'
UpperCAmelCase__ = F"\n run_eval_search.py\n {model}\n {str(__a )}\n {str(__a )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n ".split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(__a , 'argv' , __a ):
with CaptureStdout() as cs:
run_search()
UpperCAmelCase__ = [' num_beams | length_penalty', model, 'Best score args']
UpperCAmelCase__ = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(__a )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(__a ).exists()
os.remove(Path(__a ) )
| 335 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=32 , __a=2 , __a=3 , __a=16 , __a=[1, 2, 1] , __a=[2, 2, 4] , __a=2 , __a=2.0 , __a=True , __a=0.0 , __a=0.0 , __a=0.1 , __a="gelu" , __a=False , __a=True , __a=0.02 , __a=1E-5 , __a=True , __a=None , __a=True , __a=10 , __a=8 , ) -> str:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = embed_dim
UpperCAmelCase__ = depths
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = window_size
UpperCAmelCase__ = mlp_ratio
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = use_absolute_embeddings
UpperCAmelCase__ = patch_norm
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = is_training
UpperCAmelCase__ = scope
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = encoder_stride
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCamelCase__ (self , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = SwinvaModel(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a )
UpperCAmelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase__ (self , __a , __a , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = SwinvaForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase__ = 1
UpperCAmelCase__ = SwinvaForMaskedImageModeling(__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase__ (self , __a , __a , __a ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.type_sequence_label_size
UpperCAmelCase__ = SwinvaForImageClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = SwinvaModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__a , embed_dim=37 )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = outputs.attentions
UpperCAmelCase__ = len(self.model_tester.depths )
self.assertEqual(len(__a ) , __a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase__ = True
UpperCAmelCase__ = config.window_size**2
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = outputs.attentions
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
UpperCAmelCase__ = len(__a )
# Check attention is always last and order is fine
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
UpperCAmelCase__ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCAmelCase__ = 2
self.assertEqual(out_len + added_hidden_states , len(__a ) )
UpperCAmelCase__ = outputs.attentions
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def UpperCamelCase__ (self , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = outputs.hidden_states
UpperCAmelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# Swinv2 has a different seq_length
UpperCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCAmelCase__ = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = reshaped_hidden_states[0].shape
UpperCAmelCase__ = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , __a )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = 3
UpperCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = SwinvaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = _config_zero_init(__a )
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
__a )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
UpperCAmelCase__ = image_processor(images=__a , return_tensors='pt' ).to(__a )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**__a )
# verify the logits
UpperCAmelCase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
UpperCAmelCase__ = torch.tensor([-0.39_47, -0.43_06, 0.00_26] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
| 335 | 1 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_UpperCamelCase = sys.version_info >= (3, 10)
def UpperCamelCase_( snake_case__: Tuple=None , snake_case__: str=None ) -> int:
return field(default_factory=lambda: default , metadata=snake_case__ )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = field(default="""toto""" , metadata={"""help""": """help message"""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """titi"""
__SCREAMING_SNAKE_CASE = """toto"""
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """titi"""
__SCREAMING_SNAKE_CASE = """toto"""
__SCREAMING_SNAKE_CASE = 42
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "toto"
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = BasicEnum(self.foo )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "toto"
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = MixedTypeEnum(self.foo )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """help message"""} )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = list_field(default=[] )
__SCREAMING_SNAKE_CASE = list_field(default=[] )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list_field(default=[] )
__SCREAMING_SNAKE_CASE = list_field(default=[1, 2, 3] )
__SCREAMING_SNAKE_CASE = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
__SCREAMING_SNAKE_CASE = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field()
__SCREAMING_SNAKE_CASE = field()
__SCREAMING_SNAKE_CASE = field()
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = BasicEnum(self.required_enum )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = field()
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = field(default="""toto""" , metadata={"""help""": """help message"""} )
__SCREAMING_SNAKE_CASE = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """help message"""} )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = list_field(default=[] )
__SCREAMING_SNAKE_CASE = list_field(default=[] )
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self , __a , __a ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCAmelCase__ = {k: v for k, v in vars(__a ).items() if k != 'container'}
UpperCAmelCase__ = {k: v for k, v in vars(__a ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , __a ) and yy.get('choices' , __a ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](__a ) , yy['type'](__a ) )
del xx["type"], yy["type"]
self.assertEqual(__a , __a )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = HfArgumentParser(__a )
UpperCAmelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__a , required=__a )
expected.add_argument('--bar' , type=__a , required=__a )
expected.add_argument('--baz' , type=__a , required=__a )
expected.add_argument('--flag' , type=__a , default=__a , const=__a , nargs='?' )
self.argparsersEqual(__a , __a )
UpperCAmelCase__ = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((UpperCAmelCase__) , ) = parser.parse_args_into_dataclasses(__a , look_for_args_file=__a )
self.assertFalse(example.flag )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = HfArgumentParser(__a )
UpperCAmelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=__a )
expected.add_argument('--baz' , default='toto' , type=__a , help='help message' )
self.argparsersEqual(__a , __a )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__a , default=__a , const=__a , nargs='?' )
expected.add_argument('--baz' , type=__a , default=__a , const=__a , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=__a , dest='baz' )
expected.add_argument('--opt' , type=__a , default=__a )
UpperCAmelCase__ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__a )
for dataclass_type in dataclass_types:
UpperCAmelCase__ = HfArgumentParser(__a )
self.argparsersEqual(__a , __a )
UpperCAmelCase__ = parser.parse_args([] )
self.assertEqual(__a , Namespace(foo=__a , baz=__a , opt=__a ) )
UpperCAmelCase__ = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(__a , Namespace(foo=__a , baz=__a , opt=__a ) )
UpperCAmelCase__ = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(__a , Namespace(foo=__a , baz=__a , opt=__a ) )
UpperCAmelCase__ = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(__a , Namespace(foo=__a , baz=__a , opt=__a ) )
UpperCAmelCase__ = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(__a , Namespace(foo=__a , baz=__a , opt=__a ) )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = HfArgumentParser(__a )
UpperCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(__a , __a )
UpperCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
UpperCAmelCase__ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCAmelCase__ = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
UpperCAmelCase__ = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCAmelCase__ = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
UpperCAmelCase__ = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "toto"
UpperCAmelCase__ = HfArgumentParser(__a )
UpperCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(__a , __a )
UpperCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
UpperCAmelCase__ = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
UpperCAmelCase__ = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = HfArgumentParser(__a )
UpperCAmelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=__a )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=__a )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=__a )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=__a )
self.argparsersEqual(__a , __a )
UpperCAmelCase__ = parser.parse_args([] )
self.assertEqual(
__a , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCAmelCase__ = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(__a , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo' , default=__a , type=__a )
expected.add_argument('--bar' , default=__a , type=__a , help='help message' )
expected.add_argument('--baz' , default=__a , type=__a )
expected.add_argument('--ces' , nargs='+' , default=[] , type=__a )
expected.add_argument('--des' , nargs='+' , default=[] , type=__a )
UpperCAmelCase__ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__a )
for dataclass_type in dataclass_types:
UpperCAmelCase__ = HfArgumentParser(__a )
self.argparsersEqual(__a , __a )
UpperCAmelCase__ = parser.parse_args([] )
self.assertEqual(__a , Namespace(foo=__a , bar=__a , baz=__a , ces=[] , des=[] ) )
UpperCAmelCase__ = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(__a , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = HfArgumentParser(__a )
UpperCAmelCase__ = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=__a , required=__a )
expected.add_argument('--required_str' , type=__a , required=__a )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=__a , )
self.argparsersEqual(__a , __a )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = HfArgumentParser(__a )
UpperCAmelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__a , required=__a )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=__a , )
expected.add_argument('--opt' , type=__a , default=__a )
expected.add_argument('--baz' , default='toto' , type=__a , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=__a )
self.argparsersEqual(__a , __a )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = HfArgumentParser(__a )
UpperCAmelCase__ = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
UpperCAmelCase__ = parser.parse_dict(__a )[0]
UpperCAmelCase__ = BasicExample(**__a )
self.assertEqual(__a , __a )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = HfArgumentParser(__a )
UpperCAmelCase__ = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(__a , parser.parse_dict , __a , allow_extra_keys=__a )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = HfArgumentParser(__a )
UpperCAmelCase__ = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ = os.path.join(__a , 'temp_json' )
os.mkdir(__a )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(__a , __a )
UpperCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
UpperCAmelCase__ = BasicExample(**__a )
self.assertEqual(__a , __a )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = HfArgumentParser(__a )
UpperCAmelCase__ = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ = os.path.join(__a , 'temp_yaml' )
os.mkdir(__a )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(__a , __a )
UpperCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
UpperCAmelCase__ = BasicExample(**__a )
self.assertEqual(__a , __a )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = HfArgumentParser(__a )
self.assertIsNotNone(__a )
| 335 |
from collections import deque
def UpperCamelCase_( snake_case__: Tuple ) -> Tuple:
UpperCAmelCase__ = len(snake_case__ )
UpperCAmelCase__ = deque()
UpperCAmelCase__ = [False for _ in range(snake_case__ )]
UpperCAmelCase__ = [-1 for _ in range(snake_case__ )]
UpperCAmelCase__ = index_of[:]
def strong_connect(snake_case__: List[str] , snake_case__: List[str] , snake_case__: List[str] ):
UpperCAmelCase__ = index # the number when this node is seen
UpperCAmelCase__ = index # lowest rank node reachable from here
index += 1
stack.append(snake_case__ )
UpperCAmelCase__ = True
for w in g[v]:
if index_of[w] == -1:
UpperCAmelCase__ = strong_connect(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCAmelCase__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCAmelCase__ = []
UpperCAmelCase__ = stack.pop()
UpperCAmelCase__ = False
component.append(snake_case__ )
while w != v:
UpperCAmelCase__ = stack.pop()
UpperCAmelCase__ = False
component.append(snake_case__ )
components.append(snake_case__ )
return index
UpperCAmelCase__ = []
for v in range(snake_case__ ):
if index_of[v] == -1:
strong_connect(snake_case__ , 0 , snake_case__ )
return components
def UpperCamelCase_( snake_case__: Dict , snake_case__: List[Any] ) -> Optional[int]:
UpperCAmelCase__ = [[] for _ in range(snake_case__ )]
for u, v in edges:
g[u].append(snake_case__ )
return g
if __name__ == "__main__":
# Test
_UpperCamelCase = 7
_UpperCamelCase = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_UpperCamelCase = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_UpperCamelCase = [(u, v) for u, v in zip(source, target)]
_UpperCamelCase = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 335 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """trocr"""
__SCREAMING_SNAKE_CASE = ["""past_key_values"""]
__SCREAMING_SNAKE_CASE = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__(self , __a=50265 , __a=1024 , __a=12 , __a=16 , __a=4096 , __a="gelu" , __a=512 , __a=0.1 , __a=0.0 , __a=0.0 , __a=2 , __a=0.02 , __a=0.0 , __a=True , __a=False , __a=True , __a=True , __a=1 , __a=0 , __a=2 , **__a , ) -> int:
"""simple docstring"""
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = d_model
UpperCAmelCase__ = decoder_layers
UpperCAmelCase__ = decoder_attention_heads
UpperCAmelCase__ = decoder_ffn_dim
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = init_std
UpperCAmelCase__ = decoder_layerdrop
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = scale_embedding
UpperCAmelCase__ = use_learned_position_embeddings
UpperCAmelCase__ = layernorm_embedding
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
| 335 |
from ...configuration_utils import PretrainedConfig
_UpperCamelCase = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """tapas"""
def __init__(self , __a=30522 , __a=768 , __a=12 , __a=12 , __a=3072 , __a="gelu" , __a=0.1 , __a=0.1 , __a=1024 , __a=[3, 256, 256, 2, 256, 256, 10] , __a=0.02 , __a=1E-1_2 , __a=0 , __a=10.0 , __a=0 , __a=1.0 , __a=None , __a=1.0 , __a=False , __a=None , __a=1.0 , __a=1.0 , __a=False , __a=False , __a="ratio" , __a=None , __a=None , __a=64 , __a=32 , __a=False , __a=True , __a=False , __a=False , __a=True , __a=False , __a=None , __a=None , **__a , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=__a , **__a )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_sizes
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
# Fine-tuning task hyperparameters
UpperCAmelCase__ = positive_label_weight
UpperCAmelCase__ = num_aggregation_labels
UpperCAmelCase__ = aggregation_loss_weight
UpperCAmelCase__ = use_answer_as_supervision
UpperCAmelCase__ = answer_loss_importance
UpperCAmelCase__ = use_normalized_answer_loss
UpperCAmelCase__ = huber_loss_delta
UpperCAmelCase__ = temperature
UpperCAmelCase__ = aggregation_temperature
UpperCAmelCase__ = use_gumbel_for_cells
UpperCAmelCase__ = use_gumbel_for_aggregation
UpperCAmelCase__ = average_approximation_function
UpperCAmelCase__ = cell_selection_preference
UpperCAmelCase__ = answer_loss_cutoff
UpperCAmelCase__ = max_num_rows
UpperCAmelCase__ = max_num_columns
UpperCAmelCase__ = average_logits_per_cell
UpperCAmelCase__ = select_one_column
UpperCAmelCase__ = allow_empty_column_selection
UpperCAmelCase__ = init_cell_selection_weights_to_zero
UpperCAmelCase__ = reset_position_index_per_cell
UpperCAmelCase__ = disable_per_token_loss
# Aggregation hyperparameters
UpperCAmelCase__ = aggregation_labels
UpperCAmelCase__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , __a ):
UpperCAmelCase__ = {int(__a ): v for k, v in aggregation_labels.items()}
| 335 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["""image_processor""", """tokenizer"""]
__SCREAMING_SNAKE_CASE = """AutoImageProcessor"""
__SCREAMING_SNAKE_CASE = """AutoTokenizer"""
def __init__(self , __a , __a ) -> Any:
"""simple docstring"""
super().__init__(__a , __a )
UpperCAmelCase__ = self.image_processor
def __call__(self , __a=None , __a=None , __a=None , **__a ) -> int:
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
UpperCAmelCase__ = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
UpperCAmelCase__ = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
UpperCAmelCase__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def UpperCamelCase__ (self , *__a , **__a ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ (self , *__a , **__a ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"]
| 335 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 | 1 |
_UpperCamelCase = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def UpperCamelCase_( snake_case__: str ) -> int:
UpperCAmelCase__ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
UpperCAmelCase__ = Stack()
UpperCAmelCase__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(snake_case__ ) )
elif i in operators:
# RULE 2
operator_stack.push(snake_case__ )
elif i == ")":
# RULE 4
UpperCAmelCase__ = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase__ = operators[opr](snake_case__ , snake_case__ )
operand_stack.push(snake_case__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_UpperCamelCase = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 335 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase__ = XCLIPTextConfig()
# derive patch size from model name
UpperCAmelCase__ = model_name.find('patch' )
UpperCAmelCase__ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
UpperCAmelCase__ = XCLIPVisionConfig(patch_size=snake_case__ , num_frames=snake_case__ )
if "large" in model_name:
UpperCAmelCase__ = 7_68
UpperCAmelCase__ = 30_72
UpperCAmelCase__ = 12
UpperCAmelCase__ = 10_24
UpperCAmelCase__ = 40_96
UpperCAmelCase__ = 16
UpperCAmelCase__ = 24
UpperCAmelCase__ = 7_68
UpperCAmelCase__ = 30_72
if model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase__ = 3_36
UpperCAmelCase__ = XCLIPConfig.from_text_vision_configs(snake_case__ , snake_case__ )
if "large" in model_name:
UpperCAmelCase__ = 7_68
return config
def UpperCamelCase_( snake_case__: Any ) -> Tuple:
# text encoder
if name == "token_embedding.weight":
UpperCAmelCase__ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
UpperCAmelCase__ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
UpperCAmelCase__ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
UpperCAmelCase__ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
UpperCAmelCase__ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
UpperCAmelCase__ = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
UpperCAmelCase__ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
UpperCAmelCase__ = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
UpperCAmelCase__ = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
UpperCAmelCase__ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
UpperCAmelCase__ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
UpperCAmelCase__ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
UpperCAmelCase__ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
UpperCAmelCase__ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
UpperCAmelCase__ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
UpperCAmelCase__ = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
UpperCAmelCase__ = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
UpperCAmelCase__ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
UpperCAmelCase__ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
UpperCAmelCase__ = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
UpperCAmelCase__ = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
UpperCAmelCase__ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: List[Any] ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ = orig_state_dict.pop(snake_case__ )
if "attn.in_proj" in key:
UpperCAmelCase__ = key.split('.' )
if key.startswith('visual' ):
UpperCAmelCase__ = key_split[3]
UpperCAmelCase__ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCAmelCase__ = val[
:dim, :
]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[
-dim:, :
]
else:
UpperCAmelCase__ = val[
:dim
]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[
-dim:
]
else:
if "weight" in key:
UpperCAmelCase__ = val[
:dim, :
]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[
-dim:, :
]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[-dim:]
elif key.startswith('mit' ):
UpperCAmelCase__ = key_split[2]
UpperCAmelCase__ = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCAmelCase__ = val[:dim, :]
UpperCAmelCase__ = val[dim : dim * 2, :]
UpperCAmelCase__ = val[-dim:, :]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[dim : dim * 2]
UpperCAmelCase__ = val[-dim:]
else:
UpperCAmelCase__ = key_split[2]
UpperCAmelCase__ = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase__ = val[:dim, :]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[-dim:, :]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[-dim:]
else:
UpperCAmelCase__ = rename_key(snake_case__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCAmelCase__ = val.T
UpperCAmelCase__ = val
return orig_state_dict
def UpperCamelCase_( snake_case__: Tuple ) -> Optional[Any]:
if num_frames == 8:
UpperCAmelCase__ = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
UpperCAmelCase__ = 'eating_spaghetti.npy'
elif num_frames == 32:
UpperCAmelCase__ = 'eating_spaghetti_32_frames.npy'
UpperCAmelCase__ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=snake_case__ , repo_type='dataset' , )
UpperCAmelCase__ = np.load(snake_case__ )
return list(snake_case__ )
def UpperCamelCase_( snake_case__: Tuple , snake_case__: str=None , snake_case__: Union[str, Any]=False ) -> List[Any]:
UpperCAmelCase__ = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
UpperCAmelCase__ = model_to_url[model_name]
UpperCAmelCase__ = 8
if "16-frames" in model_name:
UpperCAmelCase__ = 16
elif "shot" in model_name:
UpperCAmelCase__ = 32
UpperCAmelCase__ = get_xclip_config(snake_case__ , snake_case__ )
UpperCAmelCase__ = XCLIPModel(snake_case__ )
model.eval()
if "drive" in checkpoint_url:
UpperCAmelCase__ = 'pytorch_model.bin'
gdown.cached_download(snake_case__ , snake_case__ , quiet=snake_case__ )
UpperCAmelCase__ = torch.load(snake_case__ , map_location='cpu' )['model']
else:
UpperCAmelCase__ = torch.hub.load_state_dict_from_url(snake_case__ )['model']
UpperCAmelCase__ = convert_state_dict(snake_case__ , snake_case__ )
UpperCAmelCase__ = XCLIPModel(snake_case__ )
UpperCAmelCase__ , UpperCAmelCase__ = model.load_state_dict(snake_case__ , strict=snake_case__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCAmelCase__ = 3_36 if model_name == 'xclip-large-patch14-16-frames' else 2_24
UpperCAmelCase__ = VideoMAEImageProcessor(size=snake_case__ )
UpperCAmelCase__ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
UpperCAmelCase__ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
UpperCAmelCase__ = XCLIPProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
UpperCAmelCase__ = prepare_video(snake_case__ )
UpperCAmelCase__ = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=snake_case__ , return_tensors='pt' , padding=snake_case__ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
UpperCAmelCase__ = model(**snake_case__ )
# Verify outputs
UpperCAmelCase__ = outputs.logits_per_video
UpperCAmelCase__ = logits_per_video.softmax(dim=1 )
print('Probs:' , snake_case__ )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCAmelCase__ = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCAmelCase__ = torch.tensor([[7.0_999e-04, 9.9_883e-01, 4.5_580e-04]] )
elif model_name == "xclip-base-patch16":
UpperCAmelCase__ = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCAmelCase__ = torch.tensor([[7.6_937e-04, 9.9_728e-01, 1.9_473e-03]] )
elif model_name == "xclip-large-patch14":
UpperCAmelCase__ = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase__ = torch.tensor([[3.3_877e-04, 9.9_937e-01, 2.8_888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCAmelCase__ = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCAmelCase__ = torch.tensor([[3.8_554e-04, 9.9_929e-01, 3.2_754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCAmelCase__ = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCAmelCase__ = torch.tensor([[7.1_890e-06, 9.9_994e-01, 5.6_559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCAmelCase__ = torch.tensor([[1.0_320e-05, 9.9_993e-01, 6.2_435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCAmelCase__ = torch.tensor([[4.1_377e-06, 9.9_990e-01, 9.8_386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCAmelCase__ = torch.tensor([[4.1_347e-05, 9.9_962e-01, 3.3_411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCAmelCase__ = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCAmelCase__ = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCAmelCase__ = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCAmelCase__ = torch.tensor([[9.8_219e-04, 9.9_593e-01, 3.0_863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCAmelCase__ = torch.tensor([[3.5_082e-04, 9.9_785e-01, 1.7_966e-03]] )
else:
raise ValueError(f"Model name {model_name} not supported" )
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(snake_case__ , organization='nielsr' )
processor.push_to_hub(snake_case__ , organization='nielsr' )
slow_tokenizer.push_to_hub(snake_case__ , organization='nielsr' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_UpperCamelCase = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 335 | 1 |
import math
def UpperCamelCase_( snake_case__: int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase_( snake_case__: float = 0.1 ) -> int:
UpperCAmelCase__ = 3
UpperCAmelCase__ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(snake_case__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def UpperCamelCase_( snake_case__: Optional[int] , snake_case__: List[Any] , snake_case__: Union[str, Any] ) -> Tuple:
UpperCAmelCase__ = OmegaConf.load(snake_case__ )
UpperCAmelCase__ = torch.load(snake_case__ , map_location='cpu' )['model']
UpperCAmelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
UpperCAmelCase__ = {}
UpperCAmelCase__ = 'first_stage_model.'
for key in keys:
if key.startswith(snake_case__ ):
UpperCAmelCase__ = state_dict[key]
# extract state_dict for UNetLDM
UpperCAmelCase__ = {}
UpperCAmelCase__ = 'model.diffusion_model.'
for key in keys:
if key.startswith(snake_case__ ):
UpperCAmelCase__ = state_dict[key]
UpperCAmelCase__ = config.model.params.first_stage_config.params
UpperCAmelCase__ = config.model.params.unet_config.params
UpperCAmelCase__ = VQModel(**snake_case__ ).eval()
vqvae.load_state_dict(snake_case__ )
UpperCAmelCase__ = UNetLDMModel(**snake_case__ ).eval()
unet.load_state_dict(snake_case__ )
UpperCAmelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=snake_case__ , )
UpperCAmelCase__ = LDMPipeline(snake_case__ , snake_case__ , snake_case__ )
pipeline.save_pretrained(snake_case__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
_UpperCamelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 335 | 1 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_UpperCamelCase = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCamelCase_( snake_case__: int ) -> str:
for pegasus_name, hf_name in PATTERNS:
UpperCAmelCase__ = k.replace(snake_case__ , snake_case__ )
return k
def UpperCamelCase_( snake_case__: dict , snake_case__: dict ) -> PegasusForConditionalGeneration:
UpperCAmelCase__ = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
UpperCAmelCase__ = PegasusConfig(**snake_case__ )
UpperCAmelCase__ = PegasusForConditionalGeneration(snake_case__ )
UpperCAmelCase__ = torch_model.model.state_dict()
UpperCAmelCase__ = {}
for k, v in tf_weights.items():
UpperCAmelCase__ = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
UpperCAmelCase__ = v.T
UpperCAmelCase__ = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
UpperCAmelCase__ = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
UpperCAmelCase__ = mapping['shared.weight']
UpperCAmelCase__ = mapping['shared.weight']
UpperCAmelCase__ = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
UpperCAmelCase__ , UpperCAmelCase__ = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
UpperCAmelCase__ = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def UpperCamelCase_( snake_case__: int="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
UpperCAmelCase__ = tf.train.list_variables(snake_case__ )
UpperCAmelCase__ = {}
UpperCAmelCase__ = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
UpperCAmelCase__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCAmelCase__ = tf.train.load_variable(snake_case__ , snake_case__ )
UpperCAmelCase__ = array
return tf_weights
def UpperCamelCase_( snake_case__: str , snake_case__: str ) -> Optional[Any]:
# save tokenizer first
UpperCAmelCase__ = Path(snake_case__ ).parent.name
UpperCAmelCase__ = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
UpperCAmelCase__ = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
UpperCAmelCase__ = get_tf_weights_as_numpy(snake_case__ )
UpperCAmelCase__ = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
UpperCAmelCase__ = task_specific_params
UpperCAmelCase__ = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
UpperCAmelCase__ = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_UpperCamelCase = parser.parse_args()
if args.save_dir is None:
_UpperCamelCase = Path(args.tf_ckpt_path).parent.name
_UpperCamelCase = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 335 |
# flake8: noqa
# Lint as: python3
_UpperCamelCase = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 335 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = 1
UpperCAmelCase__ = 3
UpperCAmelCase__ = (32, 32)
UpperCAmelCase__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
return CLIPTextModel(__a )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ = self.dummy_cond_unet_upscale
UpperCAmelCase__ = DDPMScheduler()
UpperCAmelCase__ = DDIMScheduler(prediction_type='v_prediction' )
UpperCAmelCase__ = self.dummy_vae
UpperCAmelCase__ = self.dummy_text_encoder
UpperCAmelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase__ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase__ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase__ = 'A painting of a squirrel eating a burger'
UpperCAmelCase__ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase__ = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
UpperCAmelCase__ = output.images
UpperCAmelCase__ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase__ = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=__a , )[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
UpperCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
UpperCAmelCase__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCAmelCase__ = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ = self.dummy_cond_unet_upscale
UpperCAmelCase__ = DDPMScheduler()
UpperCAmelCase__ = DDIMScheduler(prediction_type='v_prediction' )
UpperCAmelCase__ = self.dummy_vae
UpperCAmelCase__ = self.dummy_text_encoder
UpperCAmelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase__ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase__ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase__ = 'A painting of a squirrel eating a burger'
UpperCAmelCase__ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
UpperCAmelCase__ = output.images
assert image.shape[0] == 2
UpperCAmelCase__ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase__ = sd_pipe(
[prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
UpperCAmelCase__ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.dummy_cond_unet_upscale
UpperCAmelCase__ = DDPMScheduler()
UpperCAmelCase__ = DDIMScheduler(prediction_type='v_prediction' )
UpperCAmelCase__ = self.dummy_vae
UpperCAmelCase__ = self.dummy_text_encoder
UpperCAmelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
UpperCAmelCase__ = unet.half()
UpperCAmelCase__ = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase__ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase__ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase__ = 'A painting of a squirrel eating a burger'
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = sd_pipe(
[prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type='np' , ).images
UpperCAmelCase__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
UpperCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
UpperCAmelCase__ = 'stabilityai/stable-diffusion-x4-upscaler'
UpperCAmelCase__ = StableDiffusionUpscalePipeline.from_pretrained(__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase__ = 'a cat sitting on a park bench'
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(
prompt=__a , image=__a , generator=__a , output_type='np' , )
UpperCAmelCase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
UpperCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
UpperCAmelCase__ = 'stabilityai/stable-diffusion-x4-upscaler'
UpperCAmelCase__ = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase__ = 'a cat sitting on a park bench'
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(
prompt=__a , image=__a , generator=__a , output_type='np' , )
UpperCAmelCase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
UpperCAmelCase__ = 'stabilityai/stable-diffusion-x4-upscaler'
UpperCAmelCase__ = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase__ = 'a cat sitting on a park bench'
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(
prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type='np' , )
UpperCAmelCase__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 335 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """sew-d"""
def __init__(self , __a=32 , __a=768 , __a=12 , __a=12 , __a=3072 , __a=2 , __a=512 , __a=256 , __a=True , __a=True , __a=("p2c", "c2p") , __a="layer_norm" , __a="gelu_python" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.1 , __a=0.02 , __a=1E-7 , __a=1E-5 , __a="group" , __a="gelu" , __a=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __a=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __a=False , __a=128 , __a=16 , __a=True , __a=0.05 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=0 , __a="mean" , __a=False , __a=False , __a=256 , __a=0 , __a=1 , __a=2 , **__a , ) -> str:
"""simple docstring"""
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = feat_extract_norm
UpperCAmelCase__ = feat_extract_activation
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = conv_bias
UpperCAmelCase__ = num_conv_pos_embeddings
UpperCAmelCase__ = num_conv_pos_embedding_groups
UpperCAmelCase__ = len(self.conv_dim )
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = squeeze_factor
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = position_buckets
UpperCAmelCase__ = share_att_key
UpperCAmelCase__ = relative_attention
UpperCAmelCase__ = norm_rel_ebd
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = feat_proj_dropout
UpperCAmelCase__ = final_dropout
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = feature_layer_norm_eps
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase__ = apply_spec_augment
UpperCAmelCase__ = mask_time_prob
UpperCAmelCase__ = mask_time_length
UpperCAmelCase__ = mask_time_min_masks
UpperCAmelCase__ = mask_feature_prob
UpperCAmelCase__ = mask_feature_length
UpperCAmelCase__ = mask_feature_min_masks
# ctc loss
UpperCAmelCase__ = ctc_loss_reduction
UpperCAmelCase__ = ctc_zero_infinity
# sequence classification
UpperCAmelCase__ = use_weighted_layer_sum
UpperCAmelCase__ = classifier_proj_size
@property
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 335 | 1 |
import random
def UpperCamelCase_( snake_case__: int ) -> bool:
UpperCAmelCase__ = num - 1
UpperCAmelCase__ = 0
while s % 2 == 0:
UpperCAmelCase__ = s // 2
t += 1
for _ in range(5 ):
UpperCAmelCase__ = random.randrange(2 , num - 1 )
UpperCAmelCase__ = pow(snake_case__ , snake_case__ , snake_case__ )
if v != 1:
UpperCAmelCase__ = 0
while v != (num - 1):
if i == t - 1:
return False
else:
UpperCAmelCase__ = i + 1
UpperCAmelCase__ = (v**2) % num
return True
def UpperCamelCase_( snake_case__: int ) -> bool:
if num < 2:
return False
UpperCAmelCase__ = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case__ )
def UpperCamelCase_( snake_case__: int = 10_24 ) -> int:
while True:
UpperCAmelCase__ = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(snake_case__ ):
return num
if __name__ == "__main__":
_UpperCamelCase = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 335 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_UpperCamelCase = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCamelCase_( snake_case__: int ) -> str:
for pegasus_name, hf_name in PATTERNS:
UpperCAmelCase__ = k.replace(snake_case__ , snake_case__ )
return k
def UpperCamelCase_( snake_case__: dict , snake_case__: dict ) -> PegasusForConditionalGeneration:
UpperCAmelCase__ = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
UpperCAmelCase__ = PegasusConfig(**snake_case__ )
UpperCAmelCase__ = PegasusForConditionalGeneration(snake_case__ )
UpperCAmelCase__ = torch_model.model.state_dict()
UpperCAmelCase__ = {}
for k, v in tf_weights.items():
UpperCAmelCase__ = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
UpperCAmelCase__ = v.T
UpperCAmelCase__ = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
UpperCAmelCase__ = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
UpperCAmelCase__ = mapping['shared.weight']
UpperCAmelCase__ = mapping['shared.weight']
UpperCAmelCase__ = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
UpperCAmelCase__ , UpperCAmelCase__ = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
UpperCAmelCase__ = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def UpperCamelCase_( snake_case__: int="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
UpperCAmelCase__ = tf.train.list_variables(snake_case__ )
UpperCAmelCase__ = {}
UpperCAmelCase__ = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
UpperCAmelCase__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCAmelCase__ = tf.train.load_variable(snake_case__ , snake_case__ )
UpperCAmelCase__ = array
return tf_weights
def UpperCamelCase_( snake_case__: str , snake_case__: str ) -> Optional[Any]:
# save tokenizer first
UpperCAmelCase__ = Path(snake_case__ ).parent.name
UpperCAmelCase__ = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
UpperCAmelCase__ = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
UpperCAmelCase__ = get_tf_weights_as_numpy(snake_case__ )
UpperCAmelCase__ = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
UpperCAmelCase__ = task_specific_params
UpperCAmelCase__ = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
UpperCAmelCase__ = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_UpperCamelCase = parser.parse_args()
if args.save_dir is None:
_UpperCamelCase = Path(args.tf_ckpt_path).parent.name
_UpperCamelCase = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 335 | 1 |
import re
def UpperCamelCase_( snake_case__: str ) -> bool:
UpperCAmelCase__ = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' )
if match := re.search(snake_case__ , snake_case__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895'''))
| 335 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase :
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = 13
UpperCAmelCase__ = 7
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = 99
UpperCAmelCase__ = 384
UpperCAmelCase__ = 2
UpperCAmelCase__ = 4
UpperCAmelCase__ = 37
UpperCAmelCase__ = 'gelu'
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 512
UpperCAmelCase__ = 16
UpperCAmelCase__ = 2
UpperCAmelCase__ = 0.02
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = 128
UpperCAmelCase__ = 2
UpperCAmelCase__ = 9
UpperCAmelCase__ = 1
UpperCAmelCase__ = None
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel(config=__a )
UpperCAmelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCAmelCase__ = [input_ids, input_mask]
UpperCAmelCase__ = model(__a )
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertForMaskedLM(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFConvBertForSequenceClassification(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = TFConvBertForMultipleChoice(config=__a )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFConvBertForTokenClassification(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertForQuestionAnswering(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__a , hidden_size=37 )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
UpperCAmelCase__ = True
if hasattr(__a , 'use_cache' ):
UpperCAmelCase__ = True
UpperCAmelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a )
for model_class in self.all_model_classes:
UpperCAmelCase__ = self._prepare_for_class(__a , __a )
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = len(model(__a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a , saved_model=__a )
UpperCAmelCase__ = os.path.join(__a , 'saved_model' , '1' )
UpperCAmelCase__ = tf.keras.models.load_model(__a )
UpperCAmelCase__ = model(__a )
if self.is_encoder_decoder:
UpperCAmelCase__ = outputs['encoder_hidden_states']
UpperCAmelCase__ = outputs['encoder_attentions']
else:
UpperCAmelCase__ = outputs['hidden_states']
UpperCAmelCase__ = outputs['attentions']
self.assertEqual(len(__a ) , __a )
UpperCAmelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(__a )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
UpperCAmelCase__ = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a )
UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a )
def check_decoder_attentions_output(__a ):
UpperCAmelCase__ = len(__a )
self.assertEqual(out_len % 2 , 0 )
UpperCAmelCase__ = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__a ):
UpperCAmelCase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@require_tf
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
UpperCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = model(__a )[0]
UpperCAmelCase__ = [1, 6, 768]
self.assertEqual(output.shape , __a )
UpperCAmelCase__ = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
| 335 | 1 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """informer"""
__SCREAMING_SNAKE_CASE = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__(self , __a = None , __a = None , __a = "student_t" , __a = "nll" , __a = 1 , __a = None , __a = "mean" , __a = 0 , __a = 0 , __a = 0 , __a = 0 , __a = None , __a = None , __a = 64 , __a = 32 , __a = 32 , __a = 2 , __a = 2 , __a = 2 , __a = 2 , __a = True , __a = "gelu" , __a = 0.05 , __a = 0.1 , __a = 0.1 , __a = 0.1 , __a = 0.1 , __a = 100 , __a = 0.02 , __a=True , __a = "prob" , __a = 5 , __a = True , **__a , ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = prediction_length
UpperCAmelCase__ = context_length or prediction_length
UpperCAmelCase__ = distribution_output
UpperCAmelCase__ = loss
UpperCAmelCase__ = input_size
UpperCAmelCase__ = num_time_features
UpperCAmelCase__ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase__ = scaling
UpperCAmelCase__ = num_dynamic_real_features
UpperCAmelCase__ = num_static_real_features
UpperCAmelCase__ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase__ = cardinality
else:
UpperCAmelCase__ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase__ = embedding_dimension
else:
UpperCAmelCase__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase__ = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase__ = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase__ = d_model
UpperCAmelCase__ = encoder_attention_heads
UpperCAmelCase__ = decoder_attention_heads
UpperCAmelCase__ = encoder_ffn_dim
UpperCAmelCase__ = decoder_ffn_dim
UpperCAmelCase__ = encoder_layers
UpperCAmelCase__ = decoder_layers
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = encoder_layerdrop
UpperCAmelCase__ = decoder_layerdrop
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = init_std
UpperCAmelCase__ = use_cache
# Informer
UpperCAmelCase__ = attention_type
UpperCAmelCase__ = sampling_factor
UpperCAmelCase__ = distil
super().__init__(is_encoder_decoder=__a , **__a )
@property
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 335 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCamelCase )
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , **__a ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__a )
requires_backends(self , 'vision' )
requires_backends(self , 'torch' )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(__a )
def UpperCamelCase__ (self , **__a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCAmelCase__ = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
UpperCAmelCase__ = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
UpperCAmelCase__ = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
UpperCAmelCase__ = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
UpperCAmelCase__ = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCAmelCase__ = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
UpperCAmelCase__ = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
UpperCAmelCase__ = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
UpperCAmelCase__ = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
UpperCAmelCase__ = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
UpperCAmelCase__ = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
UpperCAmelCase__ = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , __a , *__a , __a=None , __a=None , **__a ) -> List[str]:
"""simple docstring"""
return super().__call__(__a , *__a , num_workers=__a , batch_size=__a , **__a )
def UpperCamelCase__ (self , __a , __a=64 , __a = 0 , __a = 512 / 1500 , __a = 32 , __a = 1 , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = load_image(__a )
UpperCAmelCase__ = self.image_processor.size['longest_edge']
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.generate_crop_boxes(
__a , __a , __a , __a , __a , __a )
UpperCAmelCase__ = self.image_processor(images=__a , return_tensors='pt' )
with self.device_placement():
if self.framework == "pt":
UpperCAmelCase__ = self.get_inference_context()
with inference_context():
UpperCAmelCase__ = self._ensure_tensor_on_device(__a , device=self.device )
UpperCAmelCase__ = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) )
UpperCAmelCase__ = image_embeddings
UpperCAmelCase__ = grid_points.shape[1]
UpperCAmelCase__ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None' )
for i in range(0 , __a , __a ):
UpperCAmelCase__ = grid_points[:, i : i + points_per_batch, :, :]
UpperCAmelCase__ = input_labels[:, i : i + points_per_batch]
UpperCAmelCase__ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase__ (self , __a , __a=0.88 , __a=0.95 , __a=0 , __a=1 , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = model_inputs.pop('input_boxes' )
UpperCAmelCase__ = model_inputs.pop('is_last' )
UpperCAmelCase__ = model_inputs.pop('original_sizes' ).tolist()
UpperCAmelCase__ = model_inputs.pop('reshaped_input_sizes' ).tolist()
UpperCAmelCase__ = self.model(**__a )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCAmelCase__ = model_outputs['pred_masks']
UpperCAmelCase__ = self.image_processor.post_process_masks(
__a , __a , __a , __a , binarize=__a )
UpperCAmelCase__ = model_outputs['iou_scores']
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __a , __a , __a , __a , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase__ (self , __a , __a=False , __a=False , __a=0.7 , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores' ) )
all_masks.extend(model_output.pop('masks' ) )
all_boxes.append(model_output.pop('boxes' ) )
UpperCAmelCase__ = torch.cat(__a )
UpperCAmelCase__ = torch.cat(__a )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.post_process_for_mask_generation(
__a , __a , __a , __a )
UpperCAmelCase__ = defaultdict(__a )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__a )
UpperCAmelCase__ = {}
if output_rle_mask:
UpperCAmelCase__ = rle_mask
if output_bboxes_mask:
UpperCAmelCase__ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 335 | 1 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_UpperCamelCase = (3, 9, -11, 0, 7, 5, 1, -1)
_UpperCamelCase = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
class lowercase :
'''simple docstring'''
def __init__(self , __a ) -> None:
"""simple docstring"""
UpperCAmelCase__ = None
for i in sorted(__a , reverse=__a ):
UpperCAmelCase__ = Node(__a , self.head )
def __iter__(self ) -> Iterator[int]:
"""simple docstring"""
UpperCAmelCase__ = self.head
while node:
yield node.data
UpperCAmelCase__ = node.next_node
def __len__(self ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __str__(self ) -> str:
"""simple docstring"""
return " -> ".join([str(__a ) for node in self] )
def UpperCamelCase_( snake_case__: SortedLinkedList , snake_case__: SortedLinkedList ) -> SortedLinkedList:
return SortedLinkedList(list(snake_case__ ) + list(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 335 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be trained."""} )
__SCREAMING_SNAKE_CASE = field(
default="""./""" , metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path of training dataset."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
__SCREAMING_SNAKE_CASE = field(default=2 , metadata={"""help""": """Batch size for training."""} )
__SCREAMING_SNAKE_CASE = field(default=2 , metadata={"""help""": """Batch size for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(default=0.1 , metadata={"""help""": """Value of weight decay."""} )
__SCREAMING_SNAKE_CASE = field(
default=10000 , metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} )
__SCREAMING_SNAKE_CASE = field(default=2E-4 , metadata={"""help""": """Learning rate fo training."""} )
__SCREAMING_SNAKE_CASE = field(default="""cosine""" , metadata={"""help""": """Learning rate."""} )
__SCREAMING_SNAKE_CASE = field(
default=750 , metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} )
__SCREAMING_SNAKE_CASE = field(
default=16 , metadata={"""help""": """Number of gradient accumulation steps."""} )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} )
__SCREAMING_SNAKE_CASE = field(default=50000 , metadata={"""help""": """Maximum number of training steps."""} )
__SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=1024 , metadata={"""help""": """Sequence lengths used for training."""} )
__SCREAMING_SNAKE_CASE = field(default=1 , metadata={"""help""": """Training seed."""} )
__SCREAMING_SNAKE_CASE = field(
default=1024 , metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """If True the data is pretokenized."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
__SCREAMING_SNAKE_CASE = field(default=2 , metadata={"""help""": """Batch size used for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=1024 , metadata={"""help""": """Length of sequences to be evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Number of workers used for code evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Sample from the language model's output distribution."""} )
__SCREAMING_SNAKE_CASE = field(default=0.2 , metadata={"""help""": """Sampling temperature used for generation."""} )
__SCREAMING_SNAKE_CASE = field(default=256 , metadata={"""help""": """Maximum number of newly generated tokens."""} )
__SCREAMING_SNAKE_CASE = field(default=0 , metadata={"""help""": """Top-k parameter used for generation."""} )
__SCREAMING_SNAKE_CASE = field(default=0.95 , metadata={"""help""": """Top-p parameter used for nucleus sampling."""} )
__SCREAMING_SNAKE_CASE = field(default=10 , metadata={"""help""": """Number of generations to run in parallel."""} )
__SCREAMING_SNAKE_CASE = field(
default=200 , metadata={"""help""": """Number of completions to generate for each sample."""} )
__SCREAMING_SNAKE_CASE = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default="""eval_results.json""" , metadata={"""help""": """Random seed used for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default="""0""" , metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} )
__SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} , )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} , )
__SCREAMING_SNAKE_CASE = field(
default="""transformersbook/codeparrot""" , metadata={"""help""": """Folder or name of dataset to process."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot-clean""" , metadata={"""help""": """Folder to save processed processed dataset."""} )
__SCREAMING_SNAKE_CASE = field(
default=100000 , metadata={"""help""": """Number of files to save per JSON output file."""} )
__SCREAMING_SNAKE_CASE = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
__SCREAMING_SNAKE_CASE = field(
default=1000 , metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=100 , metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.25 , metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=1.5 , metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.7 , metadata={"""help""": """Probability for filtering config, test and uncommon files."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """If True, near-duplicate samples are removed."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.85 , metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""gpt2""" , metadata={"""help""": """Base tokenizer to build new tokenizer from."""} )
__SCREAMING_SNAKE_CASE = field(
default="""transformersbook/codeparrot-train""" , metadata={"""help""": """Dataset to train tokenizer on."""} )
__SCREAMING_SNAKE_CASE = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
__SCREAMING_SNAKE_CASE = field(default=200000 , metadata={"""help""": """Number of examples to train tokenizer on."""} )
__SCREAMING_SNAKE_CASE = field(
default=32768 , metadata={"""help""": """Number of examples to train the tokenizer on."""} )
__SCREAMING_SNAKE_CASE = field(default="""codeparrot""" , metadata={"""help""": """Name of new tokenizer."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Push saved tokenizer to the hub."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path to the dataset to pretokenize."""} )
__SCREAMING_SNAKE_CASE = field(
default="""tokenized-codeparrot-train""" , metadata={"""help""": """Repo name of the pretokenized data."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Number of workers used for code evaluation."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""gpt2-large""" , metadata={"""help""": """Configuration to use for model initialization."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Tokenizer attached to model."""} )
__SCREAMING_SNAKE_CASE = field(default="""codeparrot""" , metadata={"""help""": """Name of the created model."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Push saved tokenizer to the hub."""} )
| 335 | 1 |
def UpperCamelCase_( snake_case__: str , snake_case__: str ) -> int:
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError('String lengths must match!' )
UpperCAmelCase__ = 0
for chara, chara in zip(snake_case__ , snake_case__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=4 , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_attention_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_choices
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_attention_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = True
UpperCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = FlaxRobertaModelTester(self )
@slow
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase__ = model_class_name.from_pretrained('roberta-base' , from_pt=__a )
UpperCAmelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__a )
| 335 | 1 |
_UpperCamelCase = tuple[float, float, float]
_UpperCamelCase = tuple[float, float, float]
def UpperCamelCase_( snake_case__: Pointad , snake_case__: Pointad ) -> Vectorad:
UpperCAmelCase__ = end_pointa[0] - end_pointa[0]
UpperCAmelCase__ = end_pointa[1] - end_pointa[1]
UpperCAmelCase__ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def UpperCamelCase_( snake_case__: Vectorad , snake_case__: Vectorad ) -> Vectorad:
UpperCAmelCase__ = ab[1] * ac[2] - ab[2] * ac[1] # *i
UpperCAmelCase__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
UpperCAmelCase__ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def UpperCamelCase_( snake_case__: Vectorad , snake_case__: int ) -> bool:
return tuple(round(snake_case__ , snake_case__ ) for x in vector ) == (0, 0, 0)
def UpperCamelCase_( snake_case__: Pointad , snake_case__: Pointad , snake_case__: Pointad , snake_case__: int = 10 ) -> bool:
UpperCAmelCase__ = create_vector(snake_case__ , snake_case__ )
UpperCAmelCase__ = create_vector(snake_case__ , snake_case__ )
return is_zero_vector(get_ad_vectors_cross(snake_case__ , snake_case__ ) , snake_case__ )
| 335 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , *__a , **__a ) -> None:
"""simple docstring"""
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , __a , )
super().__init__(*__a , **__a )
| 335 | 1 |
class lowercase :
'''simple docstring'''
def __init__(self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = {}
def UpperCamelCase__ (self ) -> None:
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(__a , ' -> ' , ' -> '.join([str(__a ) for j in self.vertex[i]] ) )
def UpperCamelCase__ (self , __a , __a ) -> None:
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__a )
else:
# else make a new vertex
UpperCAmelCase__ = [to_vertex]
def UpperCamelCase__ (self ) -> None:
"""simple docstring"""
UpperCAmelCase__ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__a , __a )
def UpperCamelCase__ (self , __a , __a ) -> None:
"""simple docstring"""
UpperCAmelCase__ = True
print(__a , end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__a , __a )
if __name__ == "__main__":
_UpperCamelCase = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 335 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 | 1 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = 'ylacombe/bark-small'
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = 'en_speaker_1'
UpperCAmelCase__ = 'This is a test string'
UpperCAmelCase__ = 'speaker_embeddings_path.json'
UpperCAmelCase__ = 'speaker_embeddings'
def UpperCamelCase__ (self , **__a ) -> List[Any]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **__a )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = BarkProcessor(tokenizer=__a )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCAmelCase__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCAmelCase__ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCAmelCase__ = 35
UpperCAmelCase__ = 2
UpperCAmelCase__ = 8
UpperCAmelCase__ = {
'semantic_prompt': np.ones(__a ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase__ = processor(text=self.input_string , voice_preset=__a )
UpperCAmelCase__ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__a , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase__ = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(__a , **__a )
UpperCAmelCase__ = processor(text=self.input_string , voice_preset=__a )
UpperCAmelCase__ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__a , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase__ = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = BarkProcessor(tokenizer=__a )
UpperCAmelCase__ = processor(text=self.input_string )
UpperCAmelCase__ = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=__a , return_attention_mask=__a , return_token_type_ids=__a , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 335 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self , __a ) -> List[Any]:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
UpperCAmelCase__ = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__a )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__a , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = 'sgugger/tiny-distilbert-classification'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , only_pretrain_model=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = AutoConfig.from_pretrained(__a )
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__a , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a , [config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = AutoConfig.from_pretrained(__a )
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a , [config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = AutoConfig.from_pretrained(__a )
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a , [config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = 'patrickvonplaten/t5-tiny-random'
UpperCAmelCase__ = AutoConfig.from_pretrained(__a )
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a , configs=[config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__a , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__a , save_to_csv=__a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(__a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(__a , 'env.csv' ) , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
benchmark.run()
self.assertTrue(Path(os.path.join(__a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__a , 'env.csv' ) ).exists() )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__a ):
self.assertTrue(hasattr(__a , 'sequential' ) )
self.assertTrue(hasattr(__a , 'cumulative' ) )
self.assertTrue(hasattr(__a , 'current' ) )
self.assertTrue(hasattr(__a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__a , 'log.txt' ) , log_print=__a , trace_memory_line_by_line=__a , eager_mode=__a , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__a , 'log.txt' ) ).exists() )
| 335 | 1 |
def UpperCamelCase_( snake_case__: list[list[int]] , snake_case__: int , snake_case__: int , snake_case__: set ) -> int:
UpperCAmelCase__ , UpperCAmelCase__ = len(snake_case__ ), len(grid[0] )
if (
min(snake_case__ , snake_case__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
UpperCAmelCase__ = 0
count += depth_first_search(snake_case__ , row + 1 , snake_case__ , snake_case__ )
count += depth_first_search(snake_case__ , row - 1 , snake_case__ , snake_case__ )
count += depth_first_search(snake_case__ , snake_case__ , col + 1 , snake_case__ )
count += depth_first_search(snake_case__ , snake_case__ , col - 1 , snake_case__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 335 | 1 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
_UpperCamelCase = logging.get_logger(__name__)
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def UpperCamelCase__ (self , __a ) -> List[Any]:
"""simple docstring"""
if isinstance(__a , __a ):
UpperCAmelCase__ = [label.strip() for label in labels.split(',' ) if label.strip()]
return labels
def __call__(self , __a , __a , __a ) -> List[Any]:
"""simple docstring"""
if len(__a ) == 0 or len(__a ) == 0:
raise ValueError('You must include at least one label and at least one sequence.' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(__a ) )
if isinstance(__a , __a ):
UpperCAmelCase__ = [sequences]
UpperCAmelCase__ = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__a )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(_UpperCamelCase )
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , __a=ZeroShotClassificationArgumentHandler() , *__a , **__a ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = args_parser
super().__init__(*__a , **__a )
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.' )
@property
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail' ):
return ind
return -1
def UpperCamelCase__ (self , __a , __a=True , __a=True , __a=TruncationStrategy.ONLY_FIRST , **__a ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`' )
UpperCAmelCase__ = self.tokenizer.eos_token
try:
UpperCAmelCase__ = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=__a , )
except Exception as e:
if "too short" in str(__a ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase__ = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def UpperCamelCase__ (self , **__a ) -> List[str]:
"""simple docstring"""
if kwargs.get('multi_class' , __a ) is not None:
UpperCAmelCase__ = kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.' )
UpperCAmelCase__ = {}
if "candidate_labels" in kwargs:
UpperCAmelCase__ = self._args_parser._parse_labels(kwargs['candidate_labels'] )
if "hypothesis_template" in kwargs:
UpperCAmelCase__ = kwargs['hypothesis_template']
UpperCAmelCase__ = {}
if "multi_label" in kwargs:
UpperCAmelCase__ = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__(self , __a , *__a , **__a , ) -> str:
"""simple docstring"""
if len(__a ) == 0:
pass
elif len(__a ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase__ = args[0]
else:
raise ValueError(F"Unable to understand extra arguments {args}" )
return super().__call__(__a , **__a )
def UpperCamelCase__ (self , __a , __a=None , __a="This example is {}." ) -> Any:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self._args_parser(__a , __a , __a )
for i, (candidate_label, sequence_pair) in enumerate(zip(__a , __a ) ):
UpperCAmelCase__ = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__a ) - 1,
**model_input,
}
def UpperCamelCase__ (self , __a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = inputs['candidate_label']
UpperCAmelCase__ = inputs['sequence']
UpperCAmelCase__ = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase__ = self.model(**__a )
UpperCAmelCase__ = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def UpperCamelCase__ (self , __a , __a=False ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = [outputs['candidate_label'] for outputs in model_outputs]
UpperCAmelCase__ = [outputs['sequence'] for outputs in model_outputs]
UpperCAmelCase__ = np.concatenate([output['logits'].numpy() for output in model_outputs] )
UpperCAmelCase__ = logits.shape[0]
UpperCAmelCase__ = len(__a )
UpperCAmelCase__ = N // n
UpperCAmelCase__ = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__a ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase__ = self.entailment_id
UpperCAmelCase__ = -1 if entailment_id == 0 else 0
UpperCAmelCase__ = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase__ = np.exp(__a ) / np.exp(__a ).sum(-1 , keepdims=__a )
UpperCAmelCase__ = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase__ = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase__ = np.exp(__a ) / np.exp(__a ).sum(-1 , keepdims=__a )
UpperCAmelCase__ = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 335 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__(self , *,
__a = 4 , __a = 768 , __a , __a , ) -> str:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Parameter(torch.zeros(__a ) )
# parameters for additional clip time embeddings
UpperCAmelCase__ = nn.Linear(__a , __a )
UpperCAmelCase__ = nn.Linear(__a , __a )
# parameters for encoder hidden states
UpperCAmelCase__ = clip_extra_context_tokens
UpperCAmelCase__ = nn.Linear(
__a , self.clip_extra_context_tokens * cross_attention_dim )
UpperCAmelCase__ = nn.Linear(__a , __a )
UpperCAmelCase__ = nn.LayerNorm(__a )
def UpperCamelCase__ (self , *, __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCAmelCase__ = image_embeddings.shape[0]
UpperCAmelCase__ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCAmelCase__ = classifier_free_guidance_embeddings.expand(
__a , -1 )
UpperCAmelCase__ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCAmelCase__ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCAmelCase__ = self.embedding_proj(__a )
UpperCAmelCase__ = self.clip_image_embeddings_project_to_time_embeddings(__a )
UpperCAmelCase__ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCAmelCase__ = self.clip_extra_context_tokens_proj(__a )
UpperCAmelCase__ = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens )
UpperCAmelCase__ = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCAmelCase__ = self.encoder_hidden_states_proj(__a )
UpperCAmelCase__ = self.text_encoder_hidden_states_norm(__a )
UpperCAmelCase__ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 335 | 1 |
from math import pi
def UpperCamelCase_( snake_case__: int , snake_case__: int ) -> float:
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 335 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BioGptTokenizer
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
UpperCAmelCase__ = dict(zip(__a , range(len(__a ) ) ) )
UpperCAmelCase__ = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(__a ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(__a ) )
def UpperCamelCase__ (self , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = 'lower newer'
UpperCAmelCase__ = 'lower newer'
return input_text, output_text
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase__ = 'lower'
UpperCAmelCase__ = ['low', 'er</w>']
UpperCAmelCase__ = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase__ = tokens + ['<unk>']
UpperCAmelCase__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
@slow
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
UpperCAmelCase__ = tokenizer.encode('sequence builders' , add_special_tokens=__a )
UpperCAmelCase__ = tokenizer.encode('multi-sequence build' , add_special_tokens=__a )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__a , __a )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 335 | 1 |
import heapq
def UpperCamelCase_( snake_case__: dict ) -> set[int]:
UpperCAmelCase__ = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(snake_case__ , [-1 * len(snake_case__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase__ = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase__ = heapq.heappop(snake_case__ )[1][0]
chosen_vertices.add(snake_case__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase__ = elem[1][1].index(snake_case__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(snake_case__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 335 |
class lowercase : # Public class to implement a graph
'''simple docstring'''
def __init__(self , __a , __a , __a ) -> None:
"""simple docstring"""
UpperCAmelCase__ = row
UpperCAmelCase__ = col
UpperCAmelCase__ = graph
def UpperCamelCase__ (self , __a , __a , __a ) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCamelCase__ (self , __a , __a , __a ) -> None:
"""simple docstring"""
UpperCAmelCase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
UpperCAmelCase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
UpperCAmelCase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __a )
def UpperCamelCase__ (self ) -> int: # And finally, count all islands.
"""simple docstring"""
UpperCAmelCase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
UpperCAmelCase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__a , __a , __a )
count += 1
return count
| 335 | 1 |
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
_UpperCamelCase = parse(importlib.metadata.version('''torch'''))
def UpperCamelCase_( snake_case__: Union[str, Version] , snake_case__: str , snake_case__: str ) -> Tuple:
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}" )
UpperCAmelCase__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase__ = parse(importlib.metadata.version(snake_case__ ) )
return operation(snake_case__ , parse(snake_case__ ) )
def UpperCamelCase_( snake_case__: str , snake_case__: str ) -> Any:
return compare_versions(snake_case__ , snake_case__ , snake_case__ )
| 335 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_UpperCamelCase = Lock()
def UpperCamelCase_( snake_case__: Optional[Any] , snake_case__: Optional[int] , snake_case__: Tuple , snake_case__: Tuple , snake_case__: Tuple , snake_case__: Dict , snake_case__: Any ) -> str:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase__ = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase__ = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def UpperCamelCase_( snake_case__: Any ) -> Tuple:
UpperCAmelCase__ = []
UpperCAmelCase__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase__ = Pipe()
UpperCAmelCase__ = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase__ = temp_rs
UpperCAmelCase__ = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
UpperCAmelCase__ = Pipe()
UpperCAmelCase__ = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase__ = temp_rs
UpperCAmelCase__ = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
UpperCAmelCase__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase_( ) -> Dict:
UpperCAmelCase__ = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
UpperCAmelCase__ = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main()
| 335 | 1 |
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_UpperCamelCase = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
_UpperCamelCase = {
'''Salesforce/codegen-350M-mono''': 2048,
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
__SCREAMING_SNAKE_CASE = CodeGenTokenizer
def __init__(self , __a=None , __a=None , __a=None , __a="<|endoftext|>" , __a="<|endoftext|>" , __a="<|endoftext|>" , __a=False , **__a , ) -> int:
"""simple docstring"""
super().__init__(
__a , __a , tokenizer_file=__a , unk_token=__a , bos_token=__a , eos_token=__a , add_prefix_space=__a , **__a , )
if kwargs.pop('add_bos_token' , __a ):
UpperCAmelCase__ = kwargs.pop('name_or_path' , '' )
raise ValueError(
'Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'
'Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'
F"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
F"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
'This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'
' so that the fast tokenizer works correctly.' )
UpperCAmelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __a ) != add_prefix_space:
UpperCAmelCase__ = getattr(__a , pre_tok_state.pop('type' ) )
UpperCAmelCase__ = add_prefix_space
UpperCAmelCase__ = pre_tok_class(**__a )
UpperCAmelCase__ = add_prefix_space
def UpperCamelCase__ (self , *__a , **__a ) -> BatchEncoding:
"""simple docstring"""
UpperCAmelCase__ = kwargs.get('is_split_into_words' , __a )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__a , **__a )
def UpperCamelCase__ (self , *__a , **__a ) -> BatchEncoding:
"""simple docstring"""
UpperCAmelCase__ = kwargs.get('is_split_into_words' , __a )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__a , **__a )
def UpperCamelCase__ (self , __a , __a = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase__ = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def UpperCamelCase__ (self , __a , __a = False , __a = None , __a = None , **__a , ) -> str:
"""simple docstring"""
UpperCAmelCase__ = super().decode(
token_ids=__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a , **__a , )
if truncate_before_pattern is not None and len(__a ) > 0:
UpperCAmelCase__ = self.truncate(__a , __a )
return decoded_text
def UpperCamelCase__ (self , __a , __a ) -> List[str]:
"""simple docstring"""
def find_re(__a , __a , __a ):
UpperCAmelCase__ = pattern.search(__a , __a )
return m.start() if m else -1
UpperCAmelCase__ = [re.compile(__a , re.MULTILINE ) for pattern in truncate_before_pattern]
UpperCAmelCase__ = list(re.finditer('^print' , __a , re.MULTILINE ) )
if len(__a ) > 1:
UpperCAmelCase__ = completion[: prints[1].start()]
UpperCAmelCase__ = list(re.finditer('^def' , __a , re.MULTILINE ) )
if len(__a ) > 1:
UpperCAmelCase__ = completion[: defs[1].start()]
UpperCAmelCase__ = 0
UpperCAmelCase__ = [
pos for pos in [find_re(__a , __a , __a ) for terminal in terminals] if pos != -1
]
if len(__a ) > 0:
return completion[: min(__a )]
else:
return completion
| 335 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
'''simple docstring'''
def __init__(self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = ''
UpperCAmelCase__ = ''
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = 256
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
def UpperCamelCase__ (self , __a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = cva.imread(__a , 0 )
UpperCAmelCase__ = copy.deepcopy(self.img )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
UpperCAmelCase__ = np.sum(__a )
for i in range(len(__a ) ):
UpperCAmelCase__ = x[i] / self.k
self.sk += prk
UpperCAmelCase__ = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase__ = int(last % last )
UpperCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__a )
UpperCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase__ = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase__ = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_UpperCamelCase = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
_UpperCamelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 335 | 1 |
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
_UpperCamelCase = {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: List[str] , snake_case__: Any , snake_case__: str , snake_case__: int=False , snake_case__: List[str]=True ) -> Optional[int]:
if model_type not in MODEL_CLASSES:
raise ValueError(f"Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}." )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
UpperCAmelCase__ = cached_file(snake_case__ , snake_case__ , force_download=not use_cached_models )
UpperCAmelCase__ = config_class.from_json_file(snake_case__ )
UpperCAmelCase__ = True
UpperCAmelCase__ = True
print(f"Building TensorFlow model from configuration: {config}" )
UpperCAmelCase__ = model_class(snake_case__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
UpperCAmelCase__ = cached_file(
snake_case__ , snake_case__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
UpperCAmelCase__ = load_pytorch_checkpoint_in_tfa_model(snake_case__ , snake_case__ )
if compare_with_pt_model:
UpperCAmelCase__ = tf_model(tf_model.dummy_inputs , training=snake_case__ ) # build the network
UpperCAmelCase__ = torch.load(snake_case__ , map_location='cpu' )
UpperCAmelCase__ = pt_model_class.from_pretrained(
pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ )
with torch.no_grad():
UpperCAmelCase__ = pt_model(**pt_model.dummy_inputs )
UpperCAmelCase__ = pto[0].numpy()
UpperCAmelCase__ = tfo[0].numpy()
UpperCAmelCase__ = np.amax(np.abs(np_pt - np_tf ) )
print(f"Max absolute difference between models outputs {diff}" )
assert diff <= 2e-2, f"Error, model absolute difference is >2e-2: {diff}"
# Save pytorch-model
print(f"Save TensorFlow model to {tf_dump_path}" )
tf_model.save_weights(snake_case__ , save_format='h5' )
def UpperCamelCase_( snake_case__: Optional[Any] , snake_case__: Dict , snake_case__: int=None , snake_case__: Union[str, Any]=None , snake_case__: Optional[Any]=False , snake_case__: Any=False , snake_case__: List[str]=False , snake_case__: str=False , ) -> Union[str, Any]:
if args_model_type is None:
UpperCAmelCase__ = list(MODEL_CLASSES.keys() )
else:
UpperCAmelCase__ = [args_model_type]
for j, model_type in enumerate(snake_case__ , start=1 ):
print('=' * 1_00 )
print(f" Converting model type {j}/{len(snake_case__ )}: {model_type}" )
print('=' * 1_00 )
if model_type not in MODEL_CLASSES:
raise ValueError(f"Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}." )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
UpperCAmelCase__ = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
UpperCAmelCase__ = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(snake_case__ , snake_case__ ) , start=1 ):
print('-' * 1_00 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f" Skipping finetuned checkpoint {model_shortcut_name}" )
continue
UpperCAmelCase__ = model_shortcut_name
elif only_convert_finetuned_models:
print(f" Skipping not finetuned checkpoint {model_shortcut_name}" )
continue
print(
f" Converting checkpoint {i}/{len(snake_case__ )}: {model_shortcut_name} - model_type {model_type}" )
print('-' * 1_00 )
if config_shortcut_name in aws_config_map:
UpperCAmelCase__ = cached_file(snake_case__ , snake_case__ , force_download=not use_cached_models )
else:
UpperCAmelCase__ = config_shortcut_name
if model_shortcut_name in aws_model_maps:
UpperCAmelCase__ = cached_file(snake_case__ , snake_case__ , force_download=not use_cached_models )
else:
UpperCAmelCase__ = model_shortcut_name
if os.path.isfile(snake_case__ ):
UpperCAmelCase__ = 'converted_model'
convert_pt_checkpoint_to_tf(
model_type=snake_case__ , pytorch_checkpoint_path=snake_case__ , config_file=snake_case__ , tf_dump_path=os.path.join(snake_case__ , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=snake_case__ , )
if remove_cached_files:
os.remove(snake_case__ )
os.remove(snake_case__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.'''
)
parser.add_argument(
'''--model_type''',
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'''convert all the models from AWS.'''
),
)
parser.add_argument(
'''--pytorch_checkpoint_path''',
default=None,
type=str,
help=(
'''Path to the PyTorch checkpoint path or shortcut name to download from AWS. '''
'''If not given, will download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
help=(
'''The config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture. If not given and '''
'''--pytorch_checkpoint_path is not given or is a shortcut name '''
'''use the configuration associated to the shortcut name on the AWS'''
),
)
parser.add_argument(
'''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.'''
)
parser.add_argument(
'''--use_cached_models''',
action='''store_true''',
help='''Use cached models if possible instead of updating to latest checkpoint versions.''',
)
parser.add_argument(
'''--remove_cached_files''',
action='''store_true''',
help='''Remove pytorch models after conversion (save memory when converting in batches).''',
)
parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''')
_UpperCamelCase = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 335 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=32 , __a=2 , __a=3 , __a=16 , __a=[1, 2, 1] , __a=[2, 2, 4] , __a=2 , __a=2.0 , __a=True , __a=0.0 , __a=0.0 , __a=0.1 , __a="gelu" , __a=False , __a=True , __a=0.02 , __a=1E-5 , __a=True , __a=None , __a=True , __a=10 , __a=8 , ) -> str:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = embed_dim
UpperCAmelCase__ = depths
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = window_size
UpperCAmelCase__ = mlp_ratio
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = use_absolute_embeddings
UpperCAmelCase__ = patch_norm
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = is_training
UpperCAmelCase__ = scope
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = encoder_stride
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCamelCase__ (self , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = SwinvaModel(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a )
UpperCAmelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase__ (self , __a , __a , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = SwinvaForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase__ = 1
UpperCAmelCase__ = SwinvaForMaskedImageModeling(__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase__ (self , __a , __a , __a ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.type_sequence_label_size
UpperCAmelCase__ = SwinvaForImageClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = SwinvaModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__a , embed_dim=37 )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = outputs.attentions
UpperCAmelCase__ = len(self.model_tester.depths )
self.assertEqual(len(__a ) , __a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase__ = True
UpperCAmelCase__ = config.window_size**2
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = outputs.attentions
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
UpperCAmelCase__ = len(__a )
# Check attention is always last and order is fine
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
UpperCAmelCase__ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCAmelCase__ = 2
self.assertEqual(out_len + added_hidden_states , len(__a ) )
UpperCAmelCase__ = outputs.attentions
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def UpperCamelCase__ (self , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = outputs.hidden_states
UpperCAmelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# Swinv2 has a different seq_length
UpperCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCAmelCase__ = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = reshaped_hidden_states[0].shape
UpperCAmelCase__ = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , __a )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = 3
UpperCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = SwinvaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = _config_zero_init(__a )
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
__a )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
UpperCAmelCase__ = image_processor(images=__a , return_tensors='pt' ).to(__a )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**__a )
# verify the logits
UpperCAmelCase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
UpperCAmelCase__ = torch.tensor([-0.39_47, -0.43_06, 0.00_26] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
| 335 | 1 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , __a , __a , __a , __a , __a , __a , __a , ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , )
def UpperCamelCase__ (self , __a = "auto" ) -> int:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
self.enable_attention_slicing(__a )
@torch.no_grad()
def __call__(self , __a , __a = 512 , __a = 512 , __a = 50 , __a = 7.5 , __a = None , __a = 1 , __a = 0.0 , __a = None , __a = None , __a = "pil" , __a = True , __a = None , __a = 1 , __a = None , **__a , ) -> int:
"""simple docstring"""
if isinstance(__a , __a ):
UpperCAmelCase__ = 1
elif isinstance(__a , __a ):
UpperCAmelCase__ = len(__a )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(__a )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__a , __a ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(__a )}." )
# get prompt text embeddings
UpperCAmelCase__ = self.tokenizer(
__a , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
UpperCAmelCase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCAmelCase__ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCAmelCase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = text_embeddings.shape
UpperCAmelCase__ = text_embeddings.repeat(1 , __a , 1 )
UpperCAmelCase__ = text_embeddings.view(bs_embed * num_images_per_prompt , __a , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase__ = 42
if negative_prompt is None:
UpperCAmelCase__ = ['']
elif type(__a ) is not type(__a ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(__a )} !="
F" {type(__a )}." )
elif isinstance(__a , __a ):
UpperCAmelCase__ = [negative_prompt]
elif batch_size != len(__a ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(__a )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
UpperCAmelCase__ = negative_prompt
UpperCAmelCase__ = text_input_ids.shape[-1]
UpperCAmelCase__ = self.tokenizer(
__a , padding='max_length' , max_length=__a , truncation=__a , return_tensors='pt' , )
UpperCAmelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase__ = uncond_embeddings.shape[1]
UpperCAmelCase__ = uncond_embeddings.repeat(__a , __a , 1 )
UpperCAmelCase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , __a , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCAmelCase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCAmelCase__ = torch.randn(
__a , generator=__a , device='cpu' , dtype=__a ).to(self.device )
UpperCAmelCase__ = torch.randn(__a , generator=__a , device='cpu' , dtype=__a ).to(
self.device )
else:
UpperCAmelCase__ = torch.randn(
__a , generator=__a , device=self.device , dtype=__a )
UpperCAmelCase__ = torch.randn(__a , generator=__a , device=self.device , dtype=__a )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
UpperCAmelCase__ = latents_reference.to(self.device )
UpperCAmelCase__ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCAmelCase__ = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCAmelCase__ = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCAmelCase__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCAmelCase__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCAmelCase__ = 0 if dx < 0 else dx
UpperCAmelCase__ = 0 if dy < 0 else dy
UpperCAmelCase__ = max(-dx , 0 )
UpperCAmelCase__ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
UpperCAmelCase__ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCAmelCase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase__ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase__ = {}
if accepts_eta:
UpperCAmelCase__ = eta
for i, t in enumerate(self.progress_bar(__a ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase__ = self.scheduler.scale_model_input(__a , __a )
# predict the noise residual
UpperCAmelCase__ = self.unet(__a , __a , encoder_hidden_states=__a ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCAmelCase__ , UpperCAmelCase__ = noise_pred.chunk(2 )
UpperCAmelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase__ = self.scheduler.step(__a , __a , __a , **__a ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__a , __a , __a )
UpperCAmelCase__ = 1 / 0.1_82_15 * latents
UpperCAmelCase__ = self.vae.decode(__a ).sample
UpperCAmelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
UpperCAmelCase__ = self.feature_extractor(self.numpy_to_pil(__a ) , return_tensors='pt' ).to(
self.device )
UpperCAmelCase__ , UpperCAmelCase__ = self.safety_checker(
images=__a , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
UpperCAmelCase__ = None
if output_type == "pil":
UpperCAmelCase__ = self.numpy_to_pil(__a )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__a , nsfw_content_detected=__a )
| 335 |
from collections import deque
def UpperCamelCase_( snake_case__: Tuple ) -> Tuple:
UpperCAmelCase__ = len(snake_case__ )
UpperCAmelCase__ = deque()
UpperCAmelCase__ = [False for _ in range(snake_case__ )]
UpperCAmelCase__ = [-1 for _ in range(snake_case__ )]
UpperCAmelCase__ = index_of[:]
def strong_connect(snake_case__: List[str] , snake_case__: List[str] , snake_case__: List[str] ):
UpperCAmelCase__ = index # the number when this node is seen
UpperCAmelCase__ = index # lowest rank node reachable from here
index += 1
stack.append(snake_case__ )
UpperCAmelCase__ = True
for w in g[v]:
if index_of[w] == -1:
UpperCAmelCase__ = strong_connect(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCAmelCase__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCAmelCase__ = []
UpperCAmelCase__ = stack.pop()
UpperCAmelCase__ = False
component.append(snake_case__ )
while w != v:
UpperCAmelCase__ = stack.pop()
UpperCAmelCase__ = False
component.append(snake_case__ )
components.append(snake_case__ )
return index
UpperCAmelCase__ = []
for v in range(snake_case__ ):
if index_of[v] == -1:
strong_connect(snake_case__ , 0 , snake_case__ )
return components
def UpperCamelCase_( snake_case__: Dict , snake_case__: List[Any] ) -> Optional[int]:
UpperCAmelCase__ = [[] for _ in range(snake_case__ )]
for u, v in edges:
g[u].append(snake_case__ )
return g
if __name__ == "__main__":
# Test
_UpperCamelCase = 7
_UpperCamelCase = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_UpperCamelCase = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_UpperCamelCase = [(u, v) for u, v in zip(source, target)]
_UpperCamelCase = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 335 | 1 |
import os
import numpy
import onnx
def UpperCamelCase_( snake_case__: str , snake_case__: Dict ) -> str:
UpperCAmelCase__ = a.name
UpperCAmelCase__ = b.name
UpperCAmelCase__ = ''
UpperCAmelCase__ = ''
UpperCAmelCase__ = a == b
UpperCAmelCase__ = name_a
UpperCAmelCase__ = name_b
return res
def UpperCamelCase_( snake_case__: List[str] , snake_case__: List[Any] , snake_case__: Dict ) -> Optional[int]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(snake_case__ , snake_case__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , snake_case__ , snake_case__ )
_graph_replace_input_with(node_proto.attribute[1].g , snake_case__ , snake_case__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , snake_case__ , snake_case__ )
def UpperCamelCase_( snake_case__: str , snake_case__: Any , snake_case__: Optional[Any] ) -> str:
for n in graph_proto.node:
_node_replace_input_with(snake_case__ , snake_case__ , snake_case__ )
def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: Optional[int] , snake_case__: Optional[Any] ) -> List[Any]:
UpperCAmelCase__ = list(model.graph.initializer )
UpperCAmelCase__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCAmelCase__ = inits[i].name
UpperCAmelCase__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , snake_case__ , snake_case__ )
def UpperCamelCase_( snake_case__: List[str] ) -> Optional[Any]:
UpperCAmelCase__ = os.path.dirname(snake_case__ )
UpperCAmelCase__ = os.path.basename(snake_case__ )
UpperCAmelCase__ = onnx.load(os.path.join(snake_case__ , snake_case__ ) )
UpperCAmelCase__ = list(model.graph.initializer )
UpperCAmelCase__ = set()
UpperCAmelCase__ = {}
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
for i in range(len(snake_case__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(snake_case__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(snake_case__ )
dup_set.add(snake_case__ )
UpperCAmelCase__ = inits[j].data_type
UpperCAmelCase__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , snake_case__ )
total_reduced_size += mem_size
UpperCAmelCase__ = inits[i].name
UpperCAmelCase__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(snake_case__ )
else:
UpperCAmelCase__ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 10_24 / 10_24 / 10_24 , 'GB' )
UpperCAmelCase__ = sorted(snake_case__ )
_remove_dup_initializers_from_model(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase__ = 'optimized_' + model_file_name
UpperCAmelCase__ = os.path.join(snake_case__ , snake_case__ )
onnx.save(snake_case__ , snake_case__ )
return new_model
| 335 |
from ...configuration_utils import PretrainedConfig
_UpperCamelCase = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """tapas"""
def __init__(self , __a=30522 , __a=768 , __a=12 , __a=12 , __a=3072 , __a="gelu" , __a=0.1 , __a=0.1 , __a=1024 , __a=[3, 256, 256, 2, 256, 256, 10] , __a=0.02 , __a=1E-1_2 , __a=0 , __a=10.0 , __a=0 , __a=1.0 , __a=None , __a=1.0 , __a=False , __a=None , __a=1.0 , __a=1.0 , __a=False , __a=False , __a="ratio" , __a=None , __a=None , __a=64 , __a=32 , __a=False , __a=True , __a=False , __a=False , __a=True , __a=False , __a=None , __a=None , **__a , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=__a , **__a )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_sizes
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
# Fine-tuning task hyperparameters
UpperCAmelCase__ = positive_label_weight
UpperCAmelCase__ = num_aggregation_labels
UpperCAmelCase__ = aggregation_loss_weight
UpperCAmelCase__ = use_answer_as_supervision
UpperCAmelCase__ = answer_loss_importance
UpperCAmelCase__ = use_normalized_answer_loss
UpperCAmelCase__ = huber_loss_delta
UpperCAmelCase__ = temperature
UpperCAmelCase__ = aggregation_temperature
UpperCAmelCase__ = use_gumbel_for_cells
UpperCAmelCase__ = use_gumbel_for_aggregation
UpperCAmelCase__ = average_approximation_function
UpperCAmelCase__ = cell_selection_preference
UpperCAmelCase__ = answer_loss_cutoff
UpperCAmelCase__ = max_num_rows
UpperCAmelCase__ = max_num_columns
UpperCAmelCase__ = average_logits_per_cell
UpperCAmelCase__ = select_one_column
UpperCAmelCase__ = allow_empty_column_selection
UpperCAmelCase__ = init_cell_selection_weights_to_zero
UpperCAmelCase__ = reset_position_index_per_cell
UpperCAmelCase__ = disable_per_token_loss
# Aggregation hyperparameters
UpperCAmelCase__ = aggregation_labels
UpperCAmelCase__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , __a ):
UpperCAmelCase__ = {int(__a ): v for k, v in aggregation_labels.items()}
| 335 | 1 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowercase :
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=64 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> int:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = embedding_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = num_choices
UpperCAmelCase__ = scope
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = MegatronBertModel(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a , attention_mask=__a , token_type_ids=__a )
UpperCAmelCase__ = model(__a , token_type_ids=__a )
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = MegatronBertForMaskedLM(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = MegatronBertForCausalLM(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = MegatronBertForNextSentencePrediction(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = MegatronBertForPreTraining(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , next_sentence_label=__a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = MegatronBertForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = MegatronBertForSequenceClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = MegatronBertForTokenClassification(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = MegatronBertForMultipleChoice(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
# test_resize_embeddings = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self , __a , __a , __a=False ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class in get_values(__a ):
UpperCAmelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a )
UpperCAmelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = MegatronBertModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__a , hidden_size=37 )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__a )
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__a )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__a )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__a )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__a )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__a )
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__a )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__a )
def UpperCamelCase_( snake_case__: Dict ) -> List[Any]:
return torch.tensor(
snake_case__ , dtype=torch.long , device=snake_case__ , )
_UpperCamelCase = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip('Model is not available.' )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
UpperCAmelCase__ = os.path.join(os.environ['MYDIR'] , __a )
UpperCAmelCase__ = MegatronBertModel.from_pretrained(__a )
model.to(__a )
model.half()
UpperCAmelCase__ = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
UpperCAmelCase__ = model(__a )[0]
UpperCAmelCase__ = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , __a )
UpperCAmelCase__ = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3 ):
for jj in range(3 ):
UpperCAmelCase__ = output[0, ii, jj]
UpperCAmelCase__ = expected[3 * ii + jj]
UpperCAmelCase__ = 'ii={} jj={} a={} b={}'.format(__a , __a , __a , __a )
self.assertTrue(math.isclose(__a , __a , rel_tol=__a , abs_tol=__a ) , msg=__a )
| 335 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCamelCase = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase__ = XCLIPTextConfig()
# derive patch size from model name
UpperCAmelCase__ = model_name.find('patch' )
UpperCAmelCase__ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
UpperCAmelCase__ = XCLIPVisionConfig(patch_size=snake_case__ , num_frames=snake_case__ )
if "large" in model_name:
UpperCAmelCase__ = 7_68
UpperCAmelCase__ = 30_72
UpperCAmelCase__ = 12
UpperCAmelCase__ = 10_24
UpperCAmelCase__ = 40_96
UpperCAmelCase__ = 16
UpperCAmelCase__ = 24
UpperCAmelCase__ = 7_68
UpperCAmelCase__ = 30_72
if model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase__ = 3_36
UpperCAmelCase__ = XCLIPConfig.from_text_vision_configs(snake_case__ , snake_case__ )
if "large" in model_name:
UpperCAmelCase__ = 7_68
return config
def UpperCamelCase_( snake_case__: Any ) -> Tuple:
# text encoder
if name == "token_embedding.weight":
UpperCAmelCase__ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
UpperCAmelCase__ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
UpperCAmelCase__ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
UpperCAmelCase__ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
UpperCAmelCase__ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
UpperCAmelCase__ = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
UpperCAmelCase__ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
UpperCAmelCase__ = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
UpperCAmelCase__ = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
UpperCAmelCase__ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
UpperCAmelCase__ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
UpperCAmelCase__ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
UpperCAmelCase__ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
UpperCAmelCase__ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
UpperCAmelCase__ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
UpperCAmelCase__ = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
UpperCAmelCase__ = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
UpperCAmelCase__ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
UpperCAmelCase__ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
UpperCAmelCase__ = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
UpperCAmelCase__ = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
UpperCAmelCase__ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: List[Any] ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ = orig_state_dict.pop(snake_case__ )
if "attn.in_proj" in key:
UpperCAmelCase__ = key.split('.' )
if key.startswith('visual' ):
UpperCAmelCase__ = key_split[3]
UpperCAmelCase__ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCAmelCase__ = val[
:dim, :
]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[
-dim:, :
]
else:
UpperCAmelCase__ = val[
:dim
]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[
-dim:
]
else:
if "weight" in key:
UpperCAmelCase__ = val[
:dim, :
]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[
-dim:, :
]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[-dim:]
elif key.startswith('mit' ):
UpperCAmelCase__ = key_split[2]
UpperCAmelCase__ = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCAmelCase__ = val[:dim, :]
UpperCAmelCase__ = val[dim : dim * 2, :]
UpperCAmelCase__ = val[-dim:, :]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[dim : dim * 2]
UpperCAmelCase__ = val[-dim:]
else:
UpperCAmelCase__ = key_split[2]
UpperCAmelCase__ = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase__ = val[:dim, :]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[-dim:, :]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[-dim:]
else:
UpperCAmelCase__ = rename_key(snake_case__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCAmelCase__ = val.T
UpperCAmelCase__ = val
return orig_state_dict
def UpperCamelCase_( snake_case__: Tuple ) -> Optional[Any]:
if num_frames == 8:
UpperCAmelCase__ = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
UpperCAmelCase__ = 'eating_spaghetti.npy'
elif num_frames == 32:
UpperCAmelCase__ = 'eating_spaghetti_32_frames.npy'
UpperCAmelCase__ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=snake_case__ , repo_type='dataset' , )
UpperCAmelCase__ = np.load(snake_case__ )
return list(snake_case__ )
def UpperCamelCase_( snake_case__: Tuple , snake_case__: str=None , snake_case__: Union[str, Any]=False ) -> List[Any]:
UpperCAmelCase__ = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
UpperCAmelCase__ = model_to_url[model_name]
UpperCAmelCase__ = 8
if "16-frames" in model_name:
UpperCAmelCase__ = 16
elif "shot" in model_name:
UpperCAmelCase__ = 32
UpperCAmelCase__ = get_xclip_config(snake_case__ , snake_case__ )
UpperCAmelCase__ = XCLIPModel(snake_case__ )
model.eval()
if "drive" in checkpoint_url:
UpperCAmelCase__ = 'pytorch_model.bin'
gdown.cached_download(snake_case__ , snake_case__ , quiet=snake_case__ )
UpperCAmelCase__ = torch.load(snake_case__ , map_location='cpu' )['model']
else:
UpperCAmelCase__ = torch.hub.load_state_dict_from_url(snake_case__ )['model']
UpperCAmelCase__ = convert_state_dict(snake_case__ , snake_case__ )
UpperCAmelCase__ = XCLIPModel(snake_case__ )
UpperCAmelCase__ , UpperCAmelCase__ = model.load_state_dict(snake_case__ , strict=snake_case__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCAmelCase__ = 3_36 if model_name == 'xclip-large-patch14-16-frames' else 2_24
UpperCAmelCase__ = VideoMAEImageProcessor(size=snake_case__ )
UpperCAmelCase__ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
UpperCAmelCase__ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
UpperCAmelCase__ = XCLIPProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
UpperCAmelCase__ = prepare_video(snake_case__ )
UpperCAmelCase__ = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=snake_case__ , return_tensors='pt' , padding=snake_case__ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
UpperCAmelCase__ = model(**snake_case__ )
# Verify outputs
UpperCAmelCase__ = outputs.logits_per_video
UpperCAmelCase__ = logits_per_video.softmax(dim=1 )
print('Probs:' , snake_case__ )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCAmelCase__ = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCAmelCase__ = torch.tensor([[7.0_999e-04, 9.9_883e-01, 4.5_580e-04]] )
elif model_name == "xclip-base-patch16":
UpperCAmelCase__ = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCAmelCase__ = torch.tensor([[7.6_937e-04, 9.9_728e-01, 1.9_473e-03]] )
elif model_name == "xclip-large-patch14":
UpperCAmelCase__ = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase__ = torch.tensor([[3.3_877e-04, 9.9_937e-01, 2.8_888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCAmelCase__ = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCAmelCase__ = torch.tensor([[3.8_554e-04, 9.9_929e-01, 3.2_754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCAmelCase__ = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCAmelCase__ = torch.tensor([[7.1_890e-06, 9.9_994e-01, 5.6_559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCAmelCase__ = torch.tensor([[1.0_320e-05, 9.9_993e-01, 6.2_435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCAmelCase__ = torch.tensor([[4.1_377e-06, 9.9_990e-01, 9.8_386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCAmelCase__ = torch.tensor([[4.1_347e-05, 9.9_962e-01, 3.3_411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCAmelCase__ = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCAmelCase__ = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCAmelCase__ = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCAmelCase__ = torch.tensor([[9.8_219e-04, 9.9_593e-01, 3.0_863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCAmelCase__ = torch.tensor([[3.5_082e-04, 9.9_785e-01, 1.7_966e-03]] )
else:
raise ValueError(f"Model name {model_name} not supported" )
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(snake_case__ , organization='nielsr' )
processor.push_to_hub(snake_case__ , organization='nielsr' )
slow_tokenizer.push_to_hub(snake_case__ , organization='nielsr' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_UpperCamelCase = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 335 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def UpperCamelCase_( snake_case__: Optional[int] , snake_case__: List[Any] , snake_case__: Union[str, Any] ) -> Tuple:
UpperCAmelCase__ = OmegaConf.load(snake_case__ )
UpperCAmelCase__ = torch.load(snake_case__ , map_location='cpu' )['model']
UpperCAmelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
UpperCAmelCase__ = {}
UpperCAmelCase__ = 'first_stage_model.'
for key in keys:
if key.startswith(snake_case__ ):
UpperCAmelCase__ = state_dict[key]
# extract state_dict for UNetLDM
UpperCAmelCase__ = {}
UpperCAmelCase__ = 'model.diffusion_model.'
for key in keys:
if key.startswith(snake_case__ ):
UpperCAmelCase__ = state_dict[key]
UpperCAmelCase__ = config.model.params.first_stage_config.params
UpperCAmelCase__ = config.model.params.unet_config.params
UpperCAmelCase__ = VQModel(**snake_case__ ).eval()
vqvae.load_state_dict(snake_case__ )
UpperCAmelCase__ = UNetLDMModel(**snake_case__ ).eval()
unet.load_state_dict(snake_case__ )
UpperCAmelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=snake_case__ , )
UpperCAmelCase__ = LDMPipeline(snake_case__ , snake_case__ , snake_case__ )
pipeline.save_pretrained(snake_case__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
_UpperCamelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 335 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
def __init__(self , __a , __a ) -> int:
"""simple docstring"""
super().__init__()
self.register_modules(unet=__a , scheduler=__a )
@torch.no_grad()
def __call__(self , __a = 1 , __a = 50 , __a = None , __a = "pil" , __a = True , **__a , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
UpperCAmelCase__ = self.unet.config.sample_size
UpperCAmelCase__ = (batch_size, 3, img_size, img_size)
UpperCAmelCase__ = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
UpperCAmelCase__ = randn_tensor(__a , generator=__a , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__a )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
UpperCAmelCase__ = self.scheduler.schedule[t]
UpperCAmelCase__ = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
UpperCAmelCase__ , UpperCAmelCase__ = self.scheduler.add_noise_to_input(__a , __a , generator=__a )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase__ = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
UpperCAmelCase__ = self.scheduler.step(__a , __a , __a , __a )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase__ = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
UpperCAmelCase__ = self.scheduler.step_correct(
__a , __a , __a , __a , step_output.prev_sample , step_output['derivative'] , )
UpperCAmelCase__ = step_output.prev_sample
UpperCAmelCase__ = (sample / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase__ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase__ = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 335 |
# flake8: noqa
# Lint as: python3
_UpperCamelCase = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 335 | 1 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
_UpperCamelCase = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
_UpperCamelCase = '''hopper-medium-v2'''
_UpperCamelCase = gym.make(env_name)
_UpperCamelCase = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
_UpperCamelCase = env.reset()
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = 1000
_UpperCamelCase = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
_UpperCamelCase = pipeline(obs, planning_horizon=32)
# execute action in environment
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = env.step(denorm_actions)
_UpperCamelCase = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
F""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
_UpperCamelCase = next_observation
except KeyboardInterrupt:
pass
print(F"""Total reward: {total_reward}""")
| 335 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """sew-d"""
def __init__(self , __a=32 , __a=768 , __a=12 , __a=12 , __a=3072 , __a=2 , __a=512 , __a=256 , __a=True , __a=True , __a=("p2c", "c2p") , __a="layer_norm" , __a="gelu_python" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.1 , __a=0.02 , __a=1E-7 , __a=1E-5 , __a="group" , __a="gelu" , __a=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __a=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __a=False , __a=128 , __a=16 , __a=True , __a=0.05 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=0 , __a="mean" , __a=False , __a=False , __a=256 , __a=0 , __a=1 , __a=2 , **__a , ) -> str:
"""simple docstring"""
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = feat_extract_norm
UpperCAmelCase__ = feat_extract_activation
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = conv_bias
UpperCAmelCase__ = num_conv_pos_embeddings
UpperCAmelCase__ = num_conv_pos_embedding_groups
UpperCAmelCase__ = len(self.conv_dim )
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = squeeze_factor
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = position_buckets
UpperCAmelCase__ = share_att_key
UpperCAmelCase__ = relative_attention
UpperCAmelCase__ = norm_rel_ebd
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = feat_proj_dropout
UpperCAmelCase__ = final_dropout
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = feature_layer_norm_eps
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase__ = apply_spec_augment
UpperCAmelCase__ = mask_time_prob
UpperCAmelCase__ = mask_time_length
UpperCAmelCase__ = mask_time_min_masks
UpperCAmelCase__ = mask_feature_prob
UpperCAmelCase__ = mask_feature_length
UpperCAmelCase__ = mask_feature_min_masks
# ctc loss
UpperCAmelCase__ = ctc_loss_reduction
UpperCAmelCase__ = ctc_zero_infinity
# sequence classification
UpperCAmelCase__ = use_weighted_layer_sum
UpperCAmelCase__ = classifier_proj_size
@property
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 335 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
] )
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='utf-8' , check=__a , )
assert hasattr(self , 'env' )
def UpperCamelCase__ (self , __a ) -> int:
"""simple docstring"""
UpperCAmelCase__ = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
UpperCAmelCase__ = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__a , instance_count=__a , instance_type=self.instance_type , debugger_hook_config=__a , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__a , py_version='py36' , )
def UpperCamelCase__ (self , __a ) -> str:
"""simple docstring"""
TrainingJobAnalytics(__a ).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv" )
@parameterized.expand([(2,)] )
def UpperCamelCase__ (self , __a ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.create_estimator(__a )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
UpperCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __a )
| 335 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_UpperCamelCase = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCamelCase_( snake_case__: int ) -> str:
for pegasus_name, hf_name in PATTERNS:
UpperCAmelCase__ = k.replace(snake_case__ , snake_case__ )
return k
def UpperCamelCase_( snake_case__: dict , snake_case__: dict ) -> PegasusForConditionalGeneration:
UpperCAmelCase__ = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
UpperCAmelCase__ = PegasusConfig(**snake_case__ )
UpperCAmelCase__ = PegasusForConditionalGeneration(snake_case__ )
UpperCAmelCase__ = torch_model.model.state_dict()
UpperCAmelCase__ = {}
for k, v in tf_weights.items():
UpperCAmelCase__ = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
UpperCAmelCase__ = v.T
UpperCAmelCase__ = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
UpperCAmelCase__ = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
UpperCAmelCase__ = mapping['shared.weight']
UpperCAmelCase__ = mapping['shared.weight']
UpperCAmelCase__ = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
UpperCAmelCase__ , UpperCAmelCase__ = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
UpperCAmelCase__ = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def UpperCamelCase_( snake_case__: int="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
UpperCAmelCase__ = tf.train.list_variables(snake_case__ )
UpperCAmelCase__ = {}
UpperCAmelCase__ = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
UpperCAmelCase__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCAmelCase__ = tf.train.load_variable(snake_case__ , snake_case__ )
UpperCAmelCase__ = array
return tf_weights
def UpperCamelCase_( snake_case__: str , snake_case__: str ) -> Optional[Any]:
# save tokenizer first
UpperCAmelCase__ = Path(snake_case__ ).parent.name
UpperCAmelCase__ = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
UpperCAmelCase__ = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
UpperCAmelCase__ = get_tf_weights_as_numpy(snake_case__ )
UpperCAmelCase__ = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
UpperCAmelCase__ = task_specific_params
UpperCAmelCase__ = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
UpperCAmelCase__ = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_UpperCamelCase = parser.parse_args()
if args.save_dir is None:
_UpperCamelCase = Path(args.tf_ckpt_path).parent.name
_UpperCamelCase = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 335 | 1 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
UpperCAmelCase__ = 'A painting of a squirrel eating a burger'
UpperCAmelCase__ = jax.device_count()
UpperCAmelCase__ = num_samples * [prompt]
UpperCAmelCase__ = sd_pipe.prepare_inputs(__a )
UpperCAmelCase__ = replicate(__a )
UpperCAmelCase__ = shard(__a )
UpperCAmelCase__ = jax.random.PRNGKey(0 )
UpperCAmelCase__ = jax.random.split(__a , jax.device_count() )
UpperCAmelCase__ = sd_pipe(__a , __a , __a , num_inference_steps=25 , jit=__a )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
UpperCAmelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCAmelCase__ = images[0, 253:256, 253:256, -1]
UpperCAmelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase__ = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = 'stabilityai/stable-diffusion-2'
UpperCAmelCase__ , UpperCAmelCase__ = FlaxDPMSolverMultistepScheduler.from_pretrained(__a , subfolder='scheduler' )
UpperCAmelCase__ , UpperCAmelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
__a , scheduler=__a , revision='bf16' , dtype=jnp.bfloataa , )
UpperCAmelCase__ = scheduler_params
UpperCAmelCase__ = 'A painting of a squirrel eating a burger'
UpperCAmelCase__ = jax.device_count()
UpperCAmelCase__ = num_samples * [prompt]
UpperCAmelCase__ = sd_pipe.prepare_inputs(__a )
UpperCAmelCase__ = replicate(__a )
UpperCAmelCase__ = shard(__a )
UpperCAmelCase__ = jax.random.PRNGKey(0 )
UpperCAmelCase__ = jax.random.split(__a , jax.device_count() )
UpperCAmelCase__ = sd_pipe(__a , __a , __a , num_inference_steps=25 , jit=__a )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
UpperCAmelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCAmelCase__ = images[0, 253:256, 253:256, -1]
UpperCAmelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase__ = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 335 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase :
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = 13
UpperCAmelCase__ = 7
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = 99
UpperCAmelCase__ = 384
UpperCAmelCase__ = 2
UpperCAmelCase__ = 4
UpperCAmelCase__ = 37
UpperCAmelCase__ = 'gelu'
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 512
UpperCAmelCase__ = 16
UpperCAmelCase__ = 2
UpperCAmelCase__ = 0.02
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = 128
UpperCAmelCase__ = 2
UpperCAmelCase__ = 9
UpperCAmelCase__ = 1
UpperCAmelCase__ = None
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel(config=__a )
UpperCAmelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCAmelCase__ = [input_ids, input_mask]
UpperCAmelCase__ = model(__a )
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertForMaskedLM(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFConvBertForSequenceClassification(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = TFConvBertForMultipleChoice(config=__a )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFConvBertForTokenClassification(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertForQuestionAnswering(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__a , hidden_size=37 )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
UpperCAmelCase__ = True
if hasattr(__a , 'use_cache' ):
UpperCAmelCase__ = True
UpperCAmelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a )
for model_class in self.all_model_classes:
UpperCAmelCase__ = self._prepare_for_class(__a , __a )
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = len(model(__a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a , saved_model=__a )
UpperCAmelCase__ = os.path.join(__a , 'saved_model' , '1' )
UpperCAmelCase__ = tf.keras.models.load_model(__a )
UpperCAmelCase__ = model(__a )
if self.is_encoder_decoder:
UpperCAmelCase__ = outputs['encoder_hidden_states']
UpperCAmelCase__ = outputs['encoder_attentions']
else:
UpperCAmelCase__ = outputs['hidden_states']
UpperCAmelCase__ = outputs['attentions']
self.assertEqual(len(__a ) , __a )
UpperCAmelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(__a )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
UpperCAmelCase__ = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a )
UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a )
def check_decoder_attentions_output(__a ):
UpperCAmelCase__ = len(__a )
self.assertEqual(out_len % 2 , 0 )
UpperCAmelCase__ = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__a ):
UpperCAmelCase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@require_tf
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
UpperCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = model(__a )[0]
UpperCAmelCase__ = [1, 6, 768]
self.assertEqual(output.shape , __a )
UpperCAmelCase__ = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
| 335 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
'''simple docstring'''
def __init__(self , __a , __a=12 , __a=7 , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=32 , __a=2 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=512 , __a=0.02 , __a=0 , __a=None , ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_mask
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = projection_dim
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = scope
UpperCAmelCase__ = bos_token_id
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCAmelCase__ = input_mask.numpy()
UpperCAmelCase__ , UpperCAmelCase__ = input_mask.shape
UpperCAmelCase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__a ):
UpperCAmelCase__ = 1
UpperCAmelCase__ = 0
UpperCAmelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(__a )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCamelCase__ (self , __a , __a , __a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFBlipTextModel(config=__a )
UpperCAmelCase__ = model(__a , attention_mask=__a , training=__a )
UpperCAmelCase__ = model(__a , training=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (TFBlipTextModel,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = BlipTextModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__a , hidden_size=37 )
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
pass
@slow
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = TFBlipTextModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def UpperCamelCase__ (self , __a=True ) -> str:
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=__a )
| 335 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCamelCase )
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , **__a ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__a )
requires_backends(self , 'vision' )
requires_backends(self , 'torch' )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(__a )
def UpperCamelCase__ (self , **__a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCAmelCase__ = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
UpperCAmelCase__ = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
UpperCAmelCase__ = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
UpperCAmelCase__ = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
UpperCAmelCase__ = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCAmelCase__ = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
UpperCAmelCase__ = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
UpperCAmelCase__ = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
UpperCAmelCase__ = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
UpperCAmelCase__ = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
UpperCAmelCase__ = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
UpperCAmelCase__ = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , __a , *__a , __a=None , __a=None , **__a ) -> List[str]:
"""simple docstring"""
return super().__call__(__a , *__a , num_workers=__a , batch_size=__a , **__a )
def UpperCamelCase__ (self , __a , __a=64 , __a = 0 , __a = 512 / 1500 , __a = 32 , __a = 1 , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = load_image(__a )
UpperCAmelCase__ = self.image_processor.size['longest_edge']
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.generate_crop_boxes(
__a , __a , __a , __a , __a , __a )
UpperCAmelCase__ = self.image_processor(images=__a , return_tensors='pt' )
with self.device_placement():
if self.framework == "pt":
UpperCAmelCase__ = self.get_inference_context()
with inference_context():
UpperCAmelCase__ = self._ensure_tensor_on_device(__a , device=self.device )
UpperCAmelCase__ = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) )
UpperCAmelCase__ = image_embeddings
UpperCAmelCase__ = grid_points.shape[1]
UpperCAmelCase__ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None' )
for i in range(0 , __a , __a ):
UpperCAmelCase__ = grid_points[:, i : i + points_per_batch, :, :]
UpperCAmelCase__ = input_labels[:, i : i + points_per_batch]
UpperCAmelCase__ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase__ (self , __a , __a=0.88 , __a=0.95 , __a=0 , __a=1 , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = model_inputs.pop('input_boxes' )
UpperCAmelCase__ = model_inputs.pop('is_last' )
UpperCAmelCase__ = model_inputs.pop('original_sizes' ).tolist()
UpperCAmelCase__ = model_inputs.pop('reshaped_input_sizes' ).tolist()
UpperCAmelCase__ = self.model(**__a )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCAmelCase__ = model_outputs['pred_masks']
UpperCAmelCase__ = self.image_processor.post_process_masks(
__a , __a , __a , __a , binarize=__a )
UpperCAmelCase__ = model_outputs['iou_scores']
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __a , __a , __a , __a , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase__ (self , __a , __a=False , __a=False , __a=0.7 , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores' ) )
all_masks.extend(model_output.pop('masks' ) )
all_boxes.append(model_output.pop('boxes' ) )
UpperCAmelCase__ = torch.cat(__a )
UpperCAmelCase__ = torch.cat(__a )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.post_process_for_mask_generation(
__a , __a , __a , __a )
UpperCAmelCase__ = defaultdict(__a )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__a )
UpperCAmelCase__ = {}
if output_rle_mask:
UpperCAmelCase__ = rle_mask
if output_bboxes_mask:
UpperCAmelCase__ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 335 | 1 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def UpperCamelCase_( snake_case__: Union[str, Any] ) -> Optional[Any]:
if hor == 1_28:
UpperCAmelCase__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
UpperCAmelCase__ = (32, 1_28, 2_56)
UpperCAmelCase__ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
UpperCAmelCase__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
UpperCAmelCase__ = (32, 64, 1_28, 2_56)
UpperCAmelCase__ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
UpperCAmelCase__ = torch.load(f"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
UpperCAmelCase__ = model.state_dict()
UpperCAmelCase__ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_55_36,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
UpperCAmelCase__ = UNetaDModel(**snake_case__ )
print(f"length of state dict: {len(state_dict.keys() )}" )
print(f"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
UpperCAmelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCAmelCase__ = state_dict.pop(snake_case__ )
hf_value_function.load_state_dict(snake_case__ )
torch.save(hf_value_function.state_dict() , f"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(f"hub/hopper-medium-v2/unet/hor{hor}/config.json" , 'w' ) as f:
json.dump(snake_case__ , snake_case__ )
def UpperCamelCase_( ) -> List[str]:
UpperCAmelCase__ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 1_28, 2_56),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_55_36,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
UpperCAmelCase__ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
UpperCAmelCase__ = model
UpperCAmelCase__ = UNetaDModel(**snake_case__ )
print(f"length of state dict: {len(state_dict.keys() )}" )
print(f"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
UpperCAmelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCAmelCase__ = state_dict.pop(snake_case__ )
hf_value_function.load_state_dict(snake_case__ )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(snake_case__ , snake_case__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 335 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be trained."""} )
__SCREAMING_SNAKE_CASE = field(
default="""./""" , metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path of training dataset."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
__SCREAMING_SNAKE_CASE = field(default=2 , metadata={"""help""": """Batch size for training."""} )
__SCREAMING_SNAKE_CASE = field(default=2 , metadata={"""help""": """Batch size for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(default=0.1 , metadata={"""help""": """Value of weight decay."""} )
__SCREAMING_SNAKE_CASE = field(
default=10000 , metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} )
__SCREAMING_SNAKE_CASE = field(default=2E-4 , metadata={"""help""": """Learning rate fo training."""} )
__SCREAMING_SNAKE_CASE = field(default="""cosine""" , metadata={"""help""": """Learning rate."""} )
__SCREAMING_SNAKE_CASE = field(
default=750 , metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} )
__SCREAMING_SNAKE_CASE = field(
default=16 , metadata={"""help""": """Number of gradient accumulation steps."""} )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} )
__SCREAMING_SNAKE_CASE = field(default=50000 , metadata={"""help""": """Maximum number of training steps."""} )
__SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=1024 , metadata={"""help""": """Sequence lengths used for training."""} )
__SCREAMING_SNAKE_CASE = field(default=1 , metadata={"""help""": """Training seed."""} )
__SCREAMING_SNAKE_CASE = field(
default=1024 , metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """If True the data is pretokenized."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
__SCREAMING_SNAKE_CASE = field(default=2 , metadata={"""help""": """Batch size used for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=1024 , metadata={"""help""": """Length of sequences to be evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Number of workers used for code evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Sample from the language model's output distribution."""} )
__SCREAMING_SNAKE_CASE = field(default=0.2 , metadata={"""help""": """Sampling temperature used for generation."""} )
__SCREAMING_SNAKE_CASE = field(default=256 , metadata={"""help""": """Maximum number of newly generated tokens."""} )
__SCREAMING_SNAKE_CASE = field(default=0 , metadata={"""help""": """Top-k parameter used for generation."""} )
__SCREAMING_SNAKE_CASE = field(default=0.95 , metadata={"""help""": """Top-p parameter used for nucleus sampling."""} )
__SCREAMING_SNAKE_CASE = field(default=10 , metadata={"""help""": """Number of generations to run in parallel."""} )
__SCREAMING_SNAKE_CASE = field(
default=200 , metadata={"""help""": """Number of completions to generate for each sample."""} )
__SCREAMING_SNAKE_CASE = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default="""eval_results.json""" , metadata={"""help""": """Random seed used for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default="""0""" , metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} )
__SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} , )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} , )
__SCREAMING_SNAKE_CASE = field(
default="""transformersbook/codeparrot""" , metadata={"""help""": """Folder or name of dataset to process."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot-clean""" , metadata={"""help""": """Folder to save processed processed dataset."""} )
__SCREAMING_SNAKE_CASE = field(
default=100000 , metadata={"""help""": """Number of files to save per JSON output file."""} )
__SCREAMING_SNAKE_CASE = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
__SCREAMING_SNAKE_CASE = field(
default=1000 , metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=100 , metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.25 , metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=1.5 , metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.7 , metadata={"""help""": """Probability for filtering config, test and uncommon files."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """If True, near-duplicate samples are removed."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.85 , metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""gpt2""" , metadata={"""help""": """Base tokenizer to build new tokenizer from."""} )
__SCREAMING_SNAKE_CASE = field(
default="""transformersbook/codeparrot-train""" , metadata={"""help""": """Dataset to train tokenizer on."""} )
__SCREAMING_SNAKE_CASE = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
__SCREAMING_SNAKE_CASE = field(default=200000 , metadata={"""help""": """Number of examples to train tokenizer on."""} )
__SCREAMING_SNAKE_CASE = field(
default=32768 , metadata={"""help""": """Number of examples to train the tokenizer on."""} )
__SCREAMING_SNAKE_CASE = field(default="""codeparrot""" , metadata={"""help""": """Name of new tokenizer."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Push saved tokenizer to the hub."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path to the dataset to pretokenize."""} )
__SCREAMING_SNAKE_CASE = field(
default="""tokenized-codeparrot-train""" , metadata={"""help""": """Repo name of the pretokenized data."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Number of workers used for code evaluation."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""gpt2-large""" , metadata={"""help""": """Configuration to use for model initialization."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Tokenizer attached to model."""} )
__SCREAMING_SNAKE_CASE = field(default="""codeparrot""" , metadata={"""help""": """Name of the created model."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Push saved tokenizer to the hub."""} )
| 335 | 1 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def UpperCamelCase_( snake_case__: str , snake_case__: str , snake_case__: str , snake_case__: Path , snake_case__: str = None , snake_case__: str = None , snake_case__: str = None , ) -> List[str]:
if config_name_or_path is None:
UpperCAmelCase__ = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
UpperCAmelCase__ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
UpperCAmelCase__ = question_encoder_name_or_path
UpperCAmelCase__ = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
UpperCAmelCase__ = RagConfig.from_pretrained(snake_case__ )
UpperCAmelCase__ = AutoConfig.from_pretrained(snake_case__ )
UpperCAmelCase__ = AutoConfig.from_pretrained(snake_case__ )
UpperCAmelCase__ = gen_config
UpperCAmelCase__ = question_encoder_config
UpperCAmelCase__ = model_class.from_pretrained_question_encoder_generator(
snake_case__ , snake_case__ , config=snake_case__ )
rag_model.save_pretrained(snake_case__ )
# Sanity check.
model_class.from_pretrained(snake_case__ )
# Save tokenizers.
UpperCAmelCase__ = AutoTokenizer.from_pretrained(snake_case__ )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(snake_case__ )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 335 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=4 , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_attention_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_choices
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_attention_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = True
UpperCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = FlaxRobertaModelTester(self )
@slow
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase__ = model_class_name.from_pretrained('roberta-base' , from_pt=__a )
UpperCAmelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__a )
| 335 | 1 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_UpperCamelCase = 50_0000
_UpperCamelCase , _UpperCamelCase = os.path.split(__file__)
_UpperCamelCase = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def UpperCamelCase_( snake_case__: datasets.Dataset , **snake_case__: int ) -> Optional[Any]:
UpperCAmelCase__ = dataset.map(**snake_case__ )
@get_duration
def UpperCamelCase_( snake_case__: datasets.Dataset , **snake_case__: Any ) -> Union[str, Any]:
UpperCAmelCase__ = dataset.filter(**snake_case__ )
def UpperCamelCase_( ) -> List[Any]:
UpperCAmelCase__ = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
UpperCAmelCase__ = generate_example_dataset(
os.path.join(snake_case__ , 'dataset.arrow' ) , snake_case__ , num_examples=snake_case__ )
UpperCAmelCase__ = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=snake_case__ )
def tokenize(snake_case__: Any ):
return tokenizer(examples['text'] )
UpperCAmelCase__ = map(snake_case__ )
UpperCAmelCase__ = map(snake_case__ , batched=snake_case__ )
UpperCAmelCase__ = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ )
with dataset.formatted_as(type='numpy' ):
UpperCAmelCase__ = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ )
with dataset.formatted_as(type='pandas' ):
UpperCAmelCase__ = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ )
with dataset.formatted_as(type='torch' , columns='numbers' ):
UpperCAmelCase__ = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
UpperCAmelCase__ = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ )
UpperCAmelCase__ = map(snake_case__ , function=snake_case__ , batched=snake_case__ )
UpperCAmelCase__ = filter(snake_case__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(snake_case__ , 'wb' ) as f:
f.write(json.dumps(snake_case__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 335 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , *__a , **__a ) -> None:
"""simple docstring"""
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , __a , )
super().__init__(*__a , **__a )
| 335 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 | 1 |
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCamelCase = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def UpperCamelCase_( snake_case__: int , snake_case__: Tuple , snake_case__: str=8 ) -> List[str]:
UpperCAmelCase__ = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
UpperCAmelCase__ = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , __a , __a , __a , __a , __a , ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(
text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , movq=__a , )
UpperCAmelCase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
if latents is None:
UpperCAmelCase__ = randn_tensor(__a , generator=__a , device=__a , dtype=__a )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase__ = latents.to(__a )
UpperCAmelCase__ = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase__ (self , __a , __a , __a , __a , __a=None , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = len(__a ) if isinstance(__a , __a ) else 1
# get prompt text embeddings
UpperCAmelCase__ = self.tokenizer(
__a , padding='max_length' , truncation=__a , max_length=77 , return_attention_mask=__a , add_special_tokens=__a , return_tensors='pt' , )
UpperCAmelCase__ = text_inputs.input_ids
UpperCAmelCase__ = self.tokenizer(__a , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(__a , __a ):
UpperCAmelCase__ = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCAmelCase__ = text_input_ids.to(__a )
UpperCAmelCase__ = text_inputs.attention_mask.to(__a )
UpperCAmelCase__ , UpperCAmelCase__ = self.text_encoder(
input_ids=__a , attention_mask=__a )
UpperCAmelCase__ = prompt_embeds.repeat_interleave(__a , dim=0 )
UpperCAmelCase__ = text_encoder_hidden_states.repeat_interleave(__a , dim=0 )
UpperCAmelCase__ = text_mask.repeat_interleave(__a , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase__ = 42
if negative_prompt is None:
UpperCAmelCase__ = [''] * batch_size
elif type(__a ) is not type(__a ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(__a )} !="
F" {type(__a )}." )
elif isinstance(__a , __a ):
UpperCAmelCase__ = [negative_prompt]
elif batch_size != len(__a ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(__a )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
UpperCAmelCase__ = negative_prompt
UpperCAmelCase__ = self.tokenizer(
__a , padding='max_length' , max_length=77 , truncation=__a , return_attention_mask=__a , add_special_tokens=__a , return_tensors='pt' , )
UpperCAmelCase__ = uncond_input.input_ids.to(__a )
UpperCAmelCase__ = uncond_input.attention_mask.to(__a )
UpperCAmelCase__ , UpperCAmelCase__ = self.text_encoder(
input_ids=__a , attention_mask=__a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase__ = negative_prompt_embeds.shape[1]
UpperCAmelCase__ = negative_prompt_embeds.repeat(1 , __a )
UpperCAmelCase__ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __a )
UpperCAmelCase__ = uncond_text_encoder_hidden_states.shape[1]
UpperCAmelCase__ = uncond_text_encoder_hidden_states.repeat(1 , __a , 1 )
UpperCAmelCase__ = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , __a , -1 )
UpperCAmelCase__ = uncond_text_mask.repeat_interleave(__a , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase__ = torch.cat([negative_prompt_embeds, prompt_embeds] )
UpperCAmelCase__ = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
UpperCAmelCase__ = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def UpperCamelCase__ (self , __a=0 ) -> str:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase__ = torch.device(F"cuda:{gpu_id}" )
UpperCAmelCase__ = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
def UpperCamelCase__ (self , __a=0 ) -> Dict:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase__ = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=__a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase__ = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
UpperCAmelCase__ , UpperCAmelCase__ = cpu_offload_with_hook(__a , __a , prev_module_hook=__a )
if self.safety_checker is not None:
UpperCAmelCase__ , UpperCAmelCase__ = cpu_offload_with_hook(self.safety_checker , __a , prev_module_hook=__a )
# We'll offload the last model manually.
UpperCAmelCase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__a )
def __call__(self , __a , __a , __a , __a = None , __a = 512 , __a = 512 , __a = 100 , __a = 4.0 , __a = 1 , __a = None , __a = None , __a = "pil" , __a = True , ) -> Tuple:
"""simple docstring"""
if isinstance(__a , __a ):
UpperCAmelCase__ = 1
elif isinstance(__a , __a ):
UpperCAmelCase__ = len(__a )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(__a )}" )
UpperCAmelCase__ = self._execution_device
UpperCAmelCase__ = batch_size * num_images_per_prompt
UpperCAmelCase__ = guidance_scale > 1.0
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._encode_prompt(
__a , __a , __a , __a , __a )
if isinstance(__a , __a ):
UpperCAmelCase__ = torch.cat(__a , dim=0 )
if isinstance(__a , __a ):
UpperCAmelCase__ = torch.cat(__a , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase__ = image_embeds.repeat_interleave(__a , dim=0 )
UpperCAmelCase__ = negative_image_embeds.repeat_interleave(__a , dim=0 )
UpperCAmelCase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=__a )
self.scheduler.set_timesteps(__a , device=__a )
UpperCAmelCase__ = self.scheduler.timesteps
UpperCAmelCase__ = self.unet.config.in_channels
UpperCAmelCase__ , UpperCAmelCase__ = get_new_h_w(__a , __a , self.movq_scale_factor )
# create initial latent
UpperCAmelCase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , __a , __a , __a , self.scheduler , )
for i, t in enumerate(self.progress_bar(__a ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase__ = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
UpperCAmelCase__ = self.unet(
sample=__a , timestep=__a , encoder_hidden_states=__a , added_cond_kwargs=__a , return_dict=__a , )[0]
if do_classifier_free_guidance:
UpperCAmelCase__ , UpperCAmelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase__ , UpperCAmelCase__ = noise_pred.chunk(2 )
UpperCAmelCase__ , UpperCAmelCase__ = variance_pred.chunk(2 )
UpperCAmelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase__ , UpperCAmelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase__ = self.scheduler.step(
__a , __a , __a , generator=__a , ).prev_sample
# post-processing
UpperCAmelCase__ = self.movq.decode(__a , force_not_quantize=__a )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase__ = image * 0.5 + 0.5
UpperCAmelCase__ = image.clamp(0 , 1 )
UpperCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase__ = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 335 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self , __a ) -> List[Any]:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
UpperCAmelCase__ = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__a )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__a , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = 'sgugger/tiny-distilbert-classification'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , only_pretrain_model=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = AutoConfig.from_pretrained(__a )
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__a , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a , [config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = AutoConfig.from_pretrained(__a )
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a , [config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = AutoConfig.from_pretrained(__a )
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a , [config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = 'patrickvonplaten/t5-tiny-random'
UpperCAmelCase__ = AutoConfig.from_pretrained(__a )
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a , configs=[config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__a , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__a , save_to_csv=__a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(__a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(__a , 'env.csv' ) , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
benchmark.run()
self.assertTrue(Path(os.path.join(__a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__a , 'env.csv' ) ).exists() )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__a ):
self.assertTrue(hasattr(__a , 'sequential' ) )
self.assertTrue(hasattr(__a , 'cumulative' ) )
self.assertTrue(hasattr(__a , 'current' ) )
self.assertTrue(hasattr(__a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__a , 'log.txt' ) , log_print=__a , trace_memory_line_by_line=__a , eager_mode=__a , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__a , 'log.txt' ) ).exists() )
| 335 | 1 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_UpperCamelCase = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
_UpperCamelCase = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
_UpperCamelCase = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def UpperCamelCase__ (self , __a , __a , __a = CHRF.CHAR_ORDER , __a = CHRF.WORD_ORDER , __a = CHRF.BETA , __a = False , __a = False , __a = False , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = len(references[0] )
if any(len(__a ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
UpperCAmelCase__ = [[refs[i] for refs in references] for i in range(__a )]
UpperCAmelCase__ = CHRF(__a , __a , __a , __a , __a , __a )
UpperCAmelCase__ = sb_chrf.corpus_score(__a , __a )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 335 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 335 | 1 |
from datetime import datetime
import requests
def UpperCamelCase_( snake_case__: str ) -> bytes:
UpperCAmelCase__ = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
UpperCAmelCase__ = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(snake_case__ ).content
if __name__ == "__main__":
_UpperCamelCase = input('''Enter Video/IGTV url: ''').strip()
_UpperCamelCase = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 335 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__(self , *,
__a = 4 , __a = 768 , __a , __a , ) -> str:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Parameter(torch.zeros(__a ) )
# parameters for additional clip time embeddings
UpperCAmelCase__ = nn.Linear(__a , __a )
UpperCAmelCase__ = nn.Linear(__a , __a )
# parameters for encoder hidden states
UpperCAmelCase__ = clip_extra_context_tokens
UpperCAmelCase__ = nn.Linear(
__a , self.clip_extra_context_tokens * cross_attention_dim )
UpperCAmelCase__ = nn.Linear(__a , __a )
UpperCAmelCase__ = nn.LayerNorm(__a )
def UpperCamelCase__ (self , *, __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCAmelCase__ = image_embeddings.shape[0]
UpperCAmelCase__ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCAmelCase__ = classifier_free_guidance_embeddings.expand(
__a , -1 )
UpperCAmelCase__ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCAmelCase__ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCAmelCase__ = self.embedding_proj(__a )
UpperCAmelCase__ = self.clip_image_embeddings_project_to_time_embeddings(__a )
UpperCAmelCase__ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCAmelCase__ = self.clip_extra_context_tokens_proj(__a )
UpperCAmelCase__ = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens )
UpperCAmelCase__ = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCAmelCase__ = self.encoder_hidden_states_proj(__a )
UpperCAmelCase__ = self.text_encoder_hidden_states_norm(__a )
UpperCAmelCase__ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 335 | 1 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def UpperCamelCase_( snake_case__: str , snake_case__: Tuple ) -> Union[str, Any]:
UpperCAmelCase__ = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
UpperCAmelCase__ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
UpperCAmelCase__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ),
] )
UpperCAmelCase__ = transform(snake_case__ ).unsqueeze(0 ).to(snake_case__ )
return image
def UpperCamelCase_( snake_case__: int ) -> Dict:
if "visual_encoder" in key:
UpperCAmelCase__ = re.sub('visual_encoder*' , 'vision_model.encoder' , snake_case__ )
if "blocks" in key:
UpperCAmelCase__ = re.sub(r'blocks' , 'layers' , snake_case__ )
if "attn" in key:
UpperCAmelCase__ = re.sub(r'attn' , 'self_attn' , snake_case__ )
if "norm1" in key:
UpperCAmelCase__ = re.sub(r'norm1' , 'layer_norm1' , snake_case__ )
if "norm2" in key:
UpperCAmelCase__ = re.sub(r'norm2' , 'layer_norm2' , snake_case__ )
if "encoder.norm" in key:
UpperCAmelCase__ = re.sub(r'encoder.norm' , 'post_layernorm' , snake_case__ )
if "encoder.patch_embed.proj" in key:
UpperCAmelCase__ = re.sub(r'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , snake_case__ )
if "encoder.pos_embed" in key:
UpperCAmelCase__ = re.sub(r'encoder.pos_embed' , 'embeddings.position_embedding' , snake_case__ )
if "encoder.cls_token" in key:
UpperCAmelCase__ = re.sub(r'encoder.cls_token' , 'embeddings.class_embedding' , snake_case__ )
if "self_attn" in key:
UpperCAmelCase__ = re.sub(r'self_attn.proj' , 'self_attn.projection' , snake_case__ )
return key
@torch.no_grad()
def UpperCamelCase_( snake_case__: Tuple , snake_case__: Dict=None ) -> Optional[int]:
if config_path is not None:
UpperCAmelCase__ = BlipConfig.from_pretrained(snake_case__ )
else:
UpperCAmelCase__ = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
UpperCAmelCase__ = BlipForConditionalGeneration(snake_case__ ).eval()
UpperCAmelCase__ = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
UpperCAmelCase__ = blip_decoder(pretrained=snake_case__ , image_size=3_84 , vit='base' )
UpperCAmelCase__ = pt_model.eval()
UpperCAmelCase__ = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase__ = modified_state_dict.pop(snake_case__ )
UpperCAmelCase__ = rename_key(snake_case__ )
UpperCAmelCase__ = value
hf_model.load_state_dict(snake_case__ )
UpperCAmelCase__ = 3_84
UpperCAmelCase__ = load_demo_image(image_size=snake_case__ , device='cpu' )
UpperCAmelCase__ = BertTokenizer.from_pretrained('bert-base-uncased' )
UpperCAmelCase__ = tokenizer(['a picture of'] ).input_ids
UpperCAmelCase__ = hf_model.generate(snake_case__ , snake_case__ )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
UpperCAmelCase__ = hf_model.generate(snake_case__ )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCAmelCase__ = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
UpperCAmelCase__ = blip_vqa(pretrained=snake_case__ , image_size=snake_case__ , vit='base' )
vqa_model.eval()
UpperCAmelCase__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase__ = modified_state_dict.pop(snake_case__ )
UpperCAmelCase__ = rename_key(snake_case__ )
UpperCAmelCase__ = value
UpperCAmelCase__ = BlipForQuestionAnswering(snake_case__ )
hf_vqa_model.load_state_dict(snake_case__ )
UpperCAmelCase__ = ['How many dogs are in this image?']
UpperCAmelCase__ = tokenizer(snake_case__ , return_tensors='pt' ).input_ids
UpperCAmelCase__ = hf_vqa_model.generate(snake_case__ , snake_case__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
UpperCAmelCase__ = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
UpperCAmelCase__ = blip_itm(pretrained=snake_case__ , image_size=snake_case__ , vit='base' )
itm_model.eval()
UpperCAmelCase__ = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase__ = modified_state_dict.pop(snake_case__ )
UpperCAmelCase__ = rename_key(snake_case__ )
UpperCAmelCase__ = value
UpperCAmelCase__ = BlipForImageTextRetrieval(snake_case__ )
UpperCAmelCase__ = ['A picture of a woman with a dog sitting in a beach']
UpperCAmelCase__ = tokenizer(
snake_case__ , return_tensors='pt' , padding='max_length' , truncation=snake_case__ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case__ )
hf_itm_model.eval()
UpperCAmelCase__ = hf_itm_model(snake_case__ , snake_case__ , use_itm_head=snake_case__ )
UpperCAmelCase__ = hf_itm_model(snake_case__ , snake_case__ , use_itm_head=snake_case__ )
assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
_UpperCamelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 335 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BioGptTokenizer
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
UpperCAmelCase__ = dict(zip(__a , range(len(__a ) ) ) )
UpperCAmelCase__ = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(__a ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(__a ) )
def UpperCamelCase__ (self , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = 'lower newer'
UpperCAmelCase__ = 'lower newer'
return input_text, output_text
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase__ = 'lower'
UpperCAmelCase__ = ['low', 'er</w>']
UpperCAmelCase__ = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase__ = tokens + ['<unk>']
UpperCAmelCase__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
@slow
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
UpperCAmelCase__ = tokenizer.encode('sequence builders' , add_special_tokens=__a )
UpperCAmelCase__ = tokenizer.encode('multi-sequence build' , add_special_tokens=__a )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__a , __a )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 335 | 1 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase_( snake_case__: Tuple ) -> Any:
UpperCAmelCase__ = FileLock(str(tmpdir / 'foo.lock' ) )
UpperCAmelCase__ = FileLock(str(tmpdir / 'foo.lock' ) )
UpperCAmelCase__ = 0.0_1
with locka.acquire():
with pytest.raises(snake_case__ ):
UpperCAmelCase__ = time.time()
locka.acquire(snake_case__ )
assert time.time() - _start > timeout
def UpperCamelCase_( snake_case__: List[Any] ) -> Dict:
UpperCAmelCase__ = 'a' * 10_00 + '.lock'
UpperCAmelCase__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(snake_case__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
UpperCAmelCase__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case__ ):
locka.acquire(0 )
| 335 |
class lowercase : # Public class to implement a graph
'''simple docstring'''
def __init__(self , __a , __a , __a ) -> None:
"""simple docstring"""
UpperCAmelCase__ = row
UpperCAmelCase__ = col
UpperCAmelCase__ = graph
def UpperCamelCase__ (self , __a , __a , __a ) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCamelCase__ (self , __a , __a , __a ) -> None:
"""simple docstring"""
UpperCAmelCase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
UpperCAmelCase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
UpperCAmelCase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __a )
def UpperCamelCase__ (self ) -> int: # And finally, count all islands.
"""simple docstring"""
UpperCAmelCase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
UpperCAmelCase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__a , __a , __a )
count += 1
return count
| 335 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = KandinskyInpaintPipeline
__SCREAMING_SNAKE_CASE = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
__SCREAMING_SNAKE_CASE = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
__SCREAMING_SNAKE_CASE = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__SCREAMING_SNAKE_CASE = False
@property
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
return 32
@property
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
return 32
@property
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
return 100
@property
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
UpperCAmelCase__ = MultilingualCLIP(__a )
UpperCAmelCase__ = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
UpperCAmelCase__ = UNetaDConditionModel(**__a )
return model
@property
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.dummy_text_encoder
UpperCAmelCase__ = self.dummy_tokenizer
UpperCAmelCase__ = self.dummy_unet
UpperCAmelCase__ = self.dummy_movq
UpperCAmelCase__ = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=__a , set_alpha_to_one=__a , steps_offset=1 , prediction_type='epsilon' , thresholding=__a , )
UpperCAmelCase__ = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase__ (self , __a , __a=0 ) -> int:
"""simple docstring"""
UpperCAmelCase__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__a ) ).to(__a )
UpperCAmelCase__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__a )
# create init_image
UpperCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(__a ) ).to(__a )
UpperCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((256, 256) )
# create mask
UpperCAmelCase__ = np.ones((64, 64) , dtype=np.floataa )
UpperCAmelCase__ = 0
if str(__a ).startswith('mps' ):
UpperCAmelCase__ = torch.manual_seed(__a )
else:
UpperCAmelCase__ = torch.Generator(device=__a ).manual_seed(__a )
UpperCAmelCase__ = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = 'cpu'
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**__a )
UpperCAmelCase__ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase__ = pipe(**self.get_dummy_inputs(__a ) )
UpperCAmelCase__ = output.images
UpperCAmelCase__ = pipe(
**self.get_dummy_inputs(__a ) , return_dict=__a , )[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
UpperCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
print(F"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
UpperCAmelCase__ = np.ones((768, 768) , dtype=np.floataa )
UpperCAmelCase__ = 0
UpperCAmelCase__ = 'a hat'
UpperCAmelCase__ = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__a )
UpperCAmelCase__ = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa )
UpperCAmelCase__ = pipeline.to(__a )
pipeline.set_progress_bar_config(disable=__a )
UpperCAmelCase__ = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCAmelCase__ , UpperCAmelCase__ = pipe_prior(
__a , generator=__a , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
UpperCAmelCase__ = pipeline(
__a , image=__a , mask_image=__a , image_embeds=__a , negative_image_embeds=__a , generator=__a , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
UpperCAmelCase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__a , __a )
| 335 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_UpperCamelCase = Lock()
def UpperCamelCase_( snake_case__: Optional[Any] , snake_case__: Optional[int] , snake_case__: Tuple , snake_case__: Tuple , snake_case__: Tuple , snake_case__: Dict , snake_case__: Any ) -> str:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase__ = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase__ = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def UpperCamelCase_( snake_case__: Any ) -> Tuple:
UpperCAmelCase__ = []
UpperCAmelCase__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase__ = Pipe()
UpperCAmelCase__ = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase__ = temp_rs
UpperCAmelCase__ = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
UpperCAmelCase__ = Pipe()
UpperCAmelCase__ = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase__ = temp_rs
UpperCAmelCase__ = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
UpperCAmelCase__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase_( ) -> Dict:
UpperCAmelCase__ = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
UpperCAmelCase__ = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main()
| 335 | 1 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCamelCase_( snake_case__: Tuple ) -> str:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def UpperCamelCase_( snake_case__: str ) -> Dict:
# word like '180' or '身高' or '神'
for char in word:
UpperCAmelCase__ = ord(snake_case__ )
if not _is_chinese_char(snake_case__ ):
return 0
return 1
def UpperCamelCase_( snake_case__: List[str] ) -> str:
UpperCAmelCase__ = set()
for token in tokens:
UpperCAmelCase__ = len(snake_case__ ) > 1 and is_chinese(snake_case__ )
if chinese_word:
word_set.add(snake_case__ )
UpperCAmelCase__ = list(snake_case__ )
return word_list
def UpperCamelCase_( snake_case__: List[str] , snake_case__: set() ) -> List[str]:
if not chinese_word_set:
return bert_tokens
UpperCAmelCase__ = max([len(snake_case__ ) for w in chinese_word_set] )
UpperCAmelCase__ = bert_tokens
UpperCAmelCase__ , UpperCAmelCase__ = 0, len(snake_case__ )
while start < end:
UpperCAmelCase__ = True
if is_chinese(bert_word[start] ):
UpperCAmelCase__ = min(end - start , snake_case__ )
for i in range(snake_case__ , 1 , -1 ):
UpperCAmelCase__ = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCAmelCase__ = '##' + bert_word[j]
UpperCAmelCase__ = start + i
UpperCAmelCase__ = False
break
if single_word:
start += 1
return bert_word
def UpperCamelCase_( snake_case__: List[str] , snake_case__: LTP , snake_case__: BertTokenizer ) -> Dict:
UpperCAmelCase__ = []
for i in range(0 , len(snake_case__ ) , 1_00 ):
UpperCAmelCase__ = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=['cws'] ).cws
UpperCAmelCase__ = [get_chinese_word(snake_case__ ) for r in res]
ltp_res.extend(snake_case__ )
assert len(snake_case__ ) == len(snake_case__ )
UpperCAmelCase__ = []
for i in range(0 , len(snake_case__ ) , 1_00 ):
UpperCAmelCase__ = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=snake_case__ , truncation=snake_case__ , max_length=5_12 )
bert_res.extend(res['input_ids'] )
assert len(snake_case__ ) == len(snake_case__ )
UpperCAmelCase__ = []
for input_ids, chinese_word in zip(snake_case__ , snake_case__ ):
UpperCAmelCase__ = []
for id in input_ids:
UpperCAmelCase__ = bert_tokenizer._convert_id_to_token(snake_case__ )
input_tokens.append(snake_case__ )
UpperCAmelCase__ = add_sub_symbol(snake_case__ , snake_case__ )
UpperCAmelCase__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(snake_case__ ):
if token[:2] == "##":
UpperCAmelCase__ = token[2:]
# save chinese tokens' pos
if len(snake_case__ ) == 1 and _is_chinese_char(ord(snake_case__ ) ):
ref_id.append(snake_case__ )
ref_ids.append(snake_case__ )
assert len(snake_case__ ) == len(snake_case__ )
return ref_ids
def UpperCamelCase_( snake_case__: str ) -> str:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = [line.strip() for line in data if len(snake_case__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCAmelCase__ = LTP(args.ltp ) # faster in GPU device
UpperCAmelCase__ = BertTokenizer.from_pretrained(args.bert )
UpperCAmelCase__ = prepare_ref(snake_case__ , snake_case__ , snake_case__ )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
UpperCAmelCase__ = [json.dumps(snake_case__ ) + '\n' for ref in ref_ids]
f.writelines(snake_case__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
_UpperCamelCase = parser.parse_args()
main(args)
| 335 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
'''simple docstring'''
def __init__(self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = ''
UpperCAmelCase__ = ''
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = 256
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
def UpperCamelCase__ (self , __a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = cva.imread(__a , 0 )
UpperCAmelCase__ = copy.deepcopy(self.img )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
UpperCAmelCase__ = np.sum(__a )
for i in range(len(__a ) ):
UpperCAmelCase__ = x[i] / self.k
self.sk += prk
UpperCAmelCase__ = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase__ = int(last % last )
UpperCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__a )
UpperCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase__ = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase__ = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_UpperCamelCase = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
_UpperCamelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 335 | 1 |
from collections import deque
def UpperCamelCase_( snake_case__: Tuple ) -> Tuple:
UpperCAmelCase__ = len(snake_case__ )
UpperCAmelCase__ = deque()
UpperCAmelCase__ = [False for _ in range(snake_case__ )]
UpperCAmelCase__ = [-1 for _ in range(snake_case__ )]
UpperCAmelCase__ = index_of[:]
def strong_connect(snake_case__: List[str] , snake_case__: List[str] , snake_case__: List[str] ):
UpperCAmelCase__ = index # the number when this node is seen
UpperCAmelCase__ = index # lowest rank node reachable from here
index += 1
stack.append(snake_case__ )
UpperCAmelCase__ = True
for w in g[v]:
if index_of[w] == -1:
UpperCAmelCase__ = strong_connect(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCAmelCase__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCAmelCase__ = []
UpperCAmelCase__ = stack.pop()
UpperCAmelCase__ = False
component.append(snake_case__ )
while w != v:
UpperCAmelCase__ = stack.pop()
UpperCAmelCase__ = False
component.append(snake_case__ )
components.append(snake_case__ )
return index
UpperCAmelCase__ = []
for v in range(snake_case__ ):
if index_of[v] == -1:
strong_connect(snake_case__ , 0 , snake_case__ )
return components
def UpperCamelCase_( snake_case__: Dict , snake_case__: List[Any] ) -> Optional[int]:
UpperCAmelCase__ = [[] for _ in range(snake_case__ )]
for u, v in edges:
g[u].append(snake_case__ )
return g
if __name__ == "__main__":
# Test
_UpperCamelCase = 7
_UpperCamelCase = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_UpperCamelCase = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_UpperCamelCase = [(u, v) for u, v in zip(source, target)]
_UpperCamelCase = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 335 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=32 , __a=2 , __a=3 , __a=16 , __a=[1, 2, 1] , __a=[2, 2, 4] , __a=2 , __a=2.0 , __a=True , __a=0.0 , __a=0.0 , __a=0.1 , __a="gelu" , __a=False , __a=True , __a=0.02 , __a=1E-5 , __a=True , __a=None , __a=True , __a=10 , __a=8 , ) -> str:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = embed_dim
UpperCAmelCase__ = depths
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = window_size
UpperCAmelCase__ = mlp_ratio
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = use_absolute_embeddings
UpperCAmelCase__ = patch_norm
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = is_training
UpperCAmelCase__ = scope
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = encoder_stride
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCamelCase__ (self , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = SwinvaModel(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a )
UpperCAmelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase__ (self , __a , __a , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = SwinvaForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase__ = 1
UpperCAmelCase__ = SwinvaForMaskedImageModeling(__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase__ (self , __a , __a , __a ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.type_sequence_label_size
UpperCAmelCase__ = SwinvaForImageClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = SwinvaModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__a , embed_dim=37 )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = outputs.attentions
UpperCAmelCase__ = len(self.model_tester.depths )
self.assertEqual(len(__a ) , __a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase__ = True
UpperCAmelCase__ = config.window_size**2
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = outputs.attentions
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
UpperCAmelCase__ = len(__a )
# Check attention is always last and order is fine
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
UpperCAmelCase__ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCAmelCase__ = 2
self.assertEqual(out_len + added_hidden_states , len(__a ) )
UpperCAmelCase__ = outputs.attentions
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def UpperCamelCase__ (self , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = outputs.hidden_states
UpperCAmelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# Swinv2 has a different seq_length
UpperCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCAmelCase__ = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = reshaped_hidden_states[0].shape
UpperCAmelCase__ = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , __a )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = 3
UpperCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = SwinvaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = _config_zero_init(__a )
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
__a )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
UpperCAmelCase__ = image_processor(images=__a , return_tensors='pt' ).to(__a )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**__a )
# verify the logits
UpperCAmelCase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
UpperCAmelCase__ = torch.tensor([-0.39_47, -0.43_06, 0.00_26] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
| 335 | 1 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowercase :
'''simple docstring'''
def __init__(self , __a , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = 13
UpperCAmelCase__ = 7
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = 99
UpperCAmelCase__ = 32
UpperCAmelCase__ = 2
UpperCAmelCase__ = 4
UpperCAmelCase__ = 37
UpperCAmelCase__ = 'gelu'
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 512
UpperCAmelCase__ = 16
UpperCAmelCase__ = 2
UpperCAmelCase__ = 0.02
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = None
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = self.prepare_config_and_inputs()
UpperCAmelCase__ = True
UpperCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFEsmModel(config=__a )
UpperCAmelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
UpperCAmelCase__ = model(__a )
UpperCAmelCase__ = [input_ids, input_mask]
UpperCAmelCase__ = model(__a )
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a , __a , ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = True
UpperCAmelCase__ = TFEsmModel(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
UpperCAmelCase__ = model(__a )
UpperCAmelCase__ = [input_ids, input_mask]
UpperCAmelCase__ = model(__a , encoder_hidden_states=__a )
# Also check the case where encoder outputs are not passed
UpperCAmelCase__ = model(__a , attention_mask=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = TFEsmForMaskedLM(config=__a )
UpperCAmelCase__ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFEsmForTokenClassification(config=__a )
UpperCAmelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = TFEsmModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__a , hidden_size=37 )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__a )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = TFEsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
pass
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__a )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
UpperCAmelCase__ = model.get_bias()
assert isinstance(__a , __a )
for k, v in name.items():
assert isinstance(__a , tf.Variable )
else:
UpperCAmelCase__ = model.get_output_embeddings()
assert x is None
UpperCAmelCase__ = model.get_bias()
assert name is None
@require_tf
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
UpperCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = model(__a )[0]
UpperCAmelCase__ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , __a )
# compare the actual values for a slice.
UpperCAmelCase__ = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
UpperCAmelCase__ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase__ = model(__a )[0]
# compare the actual values for a slice.
UpperCAmelCase__ = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 335 |
from collections import deque
def UpperCamelCase_( snake_case__: Tuple ) -> Tuple:
UpperCAmelCase__ = len(snake_case__ )
UpperCAmelCase__ = deque()
UpperCAmelCase__ = [False for _ in range(snake_case__ )]
UpperCAmelCase__ = [-1 for _ in range(snake_case__ )]
UpperCAmelCase__ = index_of[:]
def strong_connect(snake_case__: List[str] , snake_case__: List[str] , snake_case__: List[str] ):
UpperCAmelCase__ = index # the number when this node is seen
UpperCAmelCase__ = index # lowest rank node reachable from here
index += 1
stack.append(snake_case__ )
UpperCAmelCase__ = True
for w in g[v]:
if index_of[w] == -1:
UpperCAmelCase__ = strong_connect(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCAmelCase__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCAmelCase__ = []
UpperCAmelCase__ = stack.pop()
UpperCAmelCase__ = False
component.append(snake_case__ )
while w != v:
UpperCAmelCase__ = stack.pop()
UpperCAmelCase__ = False
component.append(snake_case__ )
components.append(snake_case__ )
return index
UpperCAmelCase__ = []
for v in range(snake_case__ ):
if index_of[v] == -1:
strong_connect(snake_case__ , 0 , snake_case__ )
return components
def UpperCamelCase_( snake_case__: Dict , snake_case__: List[Any] ) -> Optional[int]:
UpperCAmelCase__ = [[] for _ in range(snake_case__ )]
for u, v in edges:
g[u].append(snake_case__ )
return g
if __name__ == "__main__":
# Test
_UpperCamelCase = 7
_UpperCamelCase = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_UpperCamelCase = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_UpperCamelCase = [(u, v) for u, v in zip(source, target)]
_UpperCamelCase = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 335 | 1 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCamelCase )
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , **__a ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__a )
requires_backends(self , 'vision' )
requires_backends(self , 'torch' )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(__a )
def UpperCamelCase__ (self , **__a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCAmelCase__ = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
UpperCAmelCase__ = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
UpperCAmelCase__ = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
UpperCAmelCase__ = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
UpperCAmelCase__ = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCAmelCase__ = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
UpperCAmelCase__ = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
UpperCAmelCase__ = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
UpperCAmelCase__ = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
UpperCAmelCase__ = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
UpperCAmelCase__ = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
UpperCAmelCase__ = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , __a , *__a , __a=None , __a=None , **__a ) -> List[str]:
"""simple docstring"""
return super().__call__(__a , *__a , num_workers=__a , batch_size=__a , **__a )
def UpperCamelCase__ (self , __a , __a=64 , __a = 0 , __a = 512 / 1500 , __a = 32 , __a = 1 , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = load_image(__a )
UpperCAmelCase__ = self.image_processor.size['longest_edge']
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.generate_crop_boxes(
__a , __a , __a , __a , __a , __a )
UpperCAmelCase__ = self.image_processor(images=__a , return_tensors='pt' )
with self.device_placement():
if self.framework == "pt":
UpperCAmelCase__ = self.get_inference_context()
with inference_context():
UpperCAmelCase__ = self._ensure_tensor_on_device(__a , device=self.device )
UpperCAmelCase__ = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) )
UpperCAmelCase__ = image_embeddings
UpperCAmelCase__ = grid_points.shape[1]
UpperCAmelCase__ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None' )
for i in range(0 , __a , __a ):
UpperCAmelCase__ = grid_points[:, i : i + points_per_batch, :, :]
UpperCAmelCase__ = input_labels[:, i : i + points_per_batch]
UpperCAmelCase__ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase__ (self , __a , __a=0.88 , __a=0.95 , __a=0 , __a=1 , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = model_inputs.pop('input_boxes' )
UpperCAmelCase__ = model_inputs.pop('is_last' )
UpperCAmelCase__ = model_inputs.pop('original_sizes' ).tolist()
UpperCAmelCase__ = model_inputs.pop('reshaped_input_sizes' ).tolist()
UpperCAmelCase__ = self.model(**__a )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCAmelCase__ = model_outputs['pred_masks']
UpperCAmelCase__ = self.image_processor.post_process_masks(
__a , __a , __a , __a , binarize=__a )
UpperCAmelCase__ = model_outputs['iou_scores']
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __a , __a , __a , __a , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase__ (self , __a , __a=False , __a=False , __a=0.7 , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores' ) )
all_masks.extend(model_output.pop('masks' ) )
all_boxes.append(model_output.pop('boxes' ) )
UpperCAmelCase__ = torch.cat(__a )
UpperCAmelCase__ = torch.cat(__a )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.post_process_for_mask_generation(
__a , __a , __a , __a )
UpperCAmelCase__ = defaultdict(__a )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__a )
UpperCAmelCase__ = {}
if output_rle_mask:
UpperCAmelCase__ = rle_mask
if output_bboxes_mask:
UpperCAmelCase__ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 335 |
from ...configuration_utils import PretrainedConfig
_UpperCamelCase = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """tapas"""
def __init__(self , __a=30522 , __a=768 , __a=12 , __a=12 , __a=3072 , __a="gelu" , __a=0.1 , __a=0.1 , __a=1024 , __a=[3, 256, 256, 2, 256, 256, 10] , __a=0.02 , __a=1E-1_2 , __a=0 , __a=10.0 , __a=0 , __a=1.0 , __a=None , __a=1.0 , __a=False , __a=None , __a=1.0 , __a=1.0 , __a=False , __a=False , __a="ratio" , __a=None , __a=None , __a=64 , __a=32 , __a=False , __a=True , __a=False , __a=False , __a=True , __a=False , __a=None , __a=None , **__a , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=__a , **__a )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_sizes
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
# Fine-tuning task hyperparameters
UpperCAmelCase__ = positive_label_weight
UpperCAmelCase__ = num_aggregation_labels
UpperCAmelCase__ = aggregation_loss_weight
UpperCAmelCase__ = use_answer_as_supervision
UpperCAmelCase__ = answer_loss_importance
UpperCAmelCase__ = use_normalized_answer_loss
UpperCAmelCase__ = huber_loss_delta
UpperCAmelCase__ = temperature
UpperCAmelCase__ = aggregation_temperature
UpperCAmelCase__ = use_gumbel_for_cells
UpperCAmelCase__ = use_gumbel_for_aggregation
UpperCAmelCase__ = average_approximation_function
UpperCAmelCase__ = cell_selection_preference
UpperCAmelCase__ = answer_loss_cutoff
UpperCAmelCase__ = max_num_rows
UpperCAmelCase__ = max_num_columns
UpperCAmelCase__ = average_logits_per_cell
UpperCAmelCase__ = select_one_column
UpperCAmelCase__ = allow_empty_column_selection
UpperCAmelCase__ = init_cell_selection_weights_to_zero
UpperCAmelCase__ = reset_position_index_per_cell
UpperCAmelCase__ = disable_per_token_loss
# Aggregation hyperparameters
UpperCAmelCase__ = aggregation_labels
UpperCAmelCase__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , __a ):
UpperCAmelCase__ = {int(__a ): v for k, v in aggregation_labels.items()}
| 335 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCamelCase = logging.get_logger(__name__)
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__(self , __a = True , __a = None , __a = PILImageResampling.BICUBIC , __a = True , __a = 1 / 255 , __a = True , __a = None , __a = None , __a = True , **__a , ) -> None:
"""simple docstring"""
super().__init__(**__a )
UpperCAmelCase__ = size if size is not None else {'height': 384, 'width': 384}
UpperCAmelCase__ = get_size_dict(__a , default_to_square=__a )
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = size
UpperCAmelCase__ = resample
UpperCAmelCase__ = do_rescale
UpperCAmelCase__ = rescale_factor
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase__ = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase__ = do_convert_rgb
def UpperCamelCase__ (self , __a , __a , __a = PILImageResampling.BICUBIC , __a = None , **__a , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase__ = get_size_dict(__a , default_to_square=__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
UpperCAmelCase__ = (size['height'], size['width'])
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def UpperCamelCase__ (self , __a , __a , __a = None , **__a , ) -> Tuple:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def UpperCamelCase__ (self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def UpperCamelCase__ (self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> PIL.Image.Image:
"""simple docstring"""
UpperCAmelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ = resample if resample is not None else self.resample
UpperCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ = image_std if image_std is not None else self.image_std
UpperCAmelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase__ = size if size is not None else self.size
UpperCAmelCase__ = get_size_dict(__a , default_to_square=__a )
UpperCAmelCase__ = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase__ = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase__ = [to_numpy_array(__a ) for image in images]
if do_resize:
UpperCAmelCase__ = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_rescale:
UpperCAmelCase__ = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
UpperCAmelCase__ = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
UpperCAmelCase__ = [to_channel_dimension_format(__a , __a ) for image in images]
UpperCAmelCase__ = BatchFeature(data={'pixel_values': images} , tensor_type=__a )
return encoded_outputs
| 335 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 | 1 |
def UpperCamelCase_( snake_case__: list ) -> float:
UpperCAmelCase__ = 0
while len(snake_case__ ) > 1:
UpperCAmelCase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCAmelCase__ = files.index(min(snake_case__ ) )
temp += files[min_index]
files.pop(snake_case__ )
files.append(snake_case__ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase__ = XCLIPTextConfig()
# derive patch size from model name
UpperCAmelCase__ = model_name.find('patch' )
UpperCAmelCase__ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
UpperCAmelCase__ = XCLIPVisionConfig(patch_size=snake_case__ , num_frames=snake_case__ )
if "large" in model_name:
UpperCAmelCase__ = 7_68
UpperCAmelCase__ = 30_72
UpperCAmelCase__ = 12
UpperCAmelCase__ = 10_24
UpperCAmelCase__ = 40_96
UpperCAmelCase__ = 16
UpperCAmelCase__ = 24
UpperCAmelCase__ = 7_68
UpperCAmelCase__ = 30_72
if model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase__ = 3_36
UpperCAmelCase__ = XCLIPConfig.from_text_vision_configs(snake_case__ , snake_case__ )
if "large" in model_name:
UpperCAmelCase__ = 7_68
return config
def UpperCamelCase_( snake_case__: Any ) -> Tuple:
# text encoder
if name == "token_embedding.weight":
UpperCAmelCase__ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
UpperCAmelCase__ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
UpperCAmelCase__ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
UpperCAmelCase__ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
UpperCAmelCase__ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
UpperCAmelCase__ = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
UpperCAmelCase__ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
UpperCAmelCase__ = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
UpperCAmelCase__ = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
UpperCAmelCase__ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
UpperCAmelCase__ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
UpperCAmelCase__ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
UpperCAmelCase__ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
UpperCAmelCase__ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
UpperCAmelCase__ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
UpperCAmelCase__ = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
UpperCAmelCase__ = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
UpperCAmelCase__ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
UpperCAmelCase__ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
UpperCAmelCase__ = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
UpperCAmelCase__ = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
UpperCAmelCase__ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: List[Any] ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ = orig_state_dict.pop(snake_case__ )
if "attn.in_proj" in key:
UpperCAmelCase__ = key.split('.' )
if key.startswith('visual' ):
UpperCAmelCase__ = key_split[3]
UpperCAmelCase__ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCAmelCase__ = val[
:dim, :
]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[
-dim:, :
]
else:
UpperCAmelCase__ = val[
:dim
]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[
-dim:
]
else:
if "weight" in key:
UpperCAmelCase__ = val[
:dim, :
]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[
-dim:, :
]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[-dim:]
elif key.startswith('mit' ):
UpperCAmelCase__ = key_split[2]
UpperCAmelCase__ = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCAmelCase__ = val[:dim, :]
UpperCAmelCase__ = val[dim : dim * 2, :]
UpperCAmelCase__ = val[-dim:, :]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[dim : dim * 2]
UpperCAmelCase__ = val[-dim:]
else:
UpperCAmelCase__ = key_split[2]
UpperCAmelCase__ = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase__ = val[:dim, :]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[-dim:, :]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[-dim:]
else:
UpperCAmelCase__ = rename_key(snake_case__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCAmelCase__ = val.T
UpperCAmelCase__ = val
return orig_state_dict
def UpperCamelCase_( snake_case__: Tuple ) -> Optional[Any]:
if num_frames == 8:
UpperCAmelCase__ = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
UpperCAmelCase__ = 'eating_spaghetti.npy'
elif num_frames == 32:
UpperCAmelCase__ = 'eating_spaghetti_32_frames.npy'
UpperCAmelCase__ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=snake_case__ , repo_type='dataset' , )
UpperCAmelCase__ = np.load(snake_case__ )
return list(snake_case__ )
def UpperCamelCase_( snake_case__: Tuple , snake_case__: str=None , snake_case__: Union[str, Any]=False ) -> List[Any]:
UpperCAmelCase__ = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
UpperCAmelCase__ = model_to_url[model_name]
UpperCAmelCase__ = 8
if "16-frames" in model_name:
UpperCAmelCase__ = 16
elif "shot" in model_name:
UpperCAmelCase__ = 32
UpperCAmelCase__ = get_xclip_config(snake_case__ , snake_case__ )
UpperCAmelCase__ = XCLIPModel(snake_case__ )
model.eval()
if "drive" in checkpoint_url:
UpperCAmelCase__ = 'pytorch_model.bin'
gdown.cached_download(snake_case__ , snake_case__ , quiet=snake_case__ )
UpperCAmelCase__ = torch.load(snake_case__ , map_location='cpu' )['model']
else:
UpperCAmelCase__ = torch.hub.load_state_dict_from_url(snake_case__ )['model']
UpperCAmelCase__ = convert_state_dict(snake_case__ , snake_case__ )
UpperCAmelCase__ = XCLIPModel(snake_case__ )
UpperCAmelCase__ , UpperCAmelCase__ = model.load_state_dict(snake_case__ , strict=snake_case__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCAmelCase__ = 3_36 if model_name == 'xclip-large-patch14-16-frames' else 2_24
UpperCAmelCase__ = VideoMAEImageProcessor(size=snake_case__ )
UpperCAmelCase__ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
UpperCAmelCase__ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
UpperCAmelCase__ = XCLIPProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
UpperCAmelCase__ = prepare_video(snake_case__ )
UpperCAmelCase__ = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=snake_case__ , return_tensors='pt' , padding=snake_case__ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
UpperCAmelCase__ = model(**snake_case__ )
# Verify outputs
UpperCAmelCase__ = outputs.logits_per_video
UpperCAmelCase__ = logits_per_video.softmax(dim=1 )
print('Probs:' , snake_case__ )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCAmelCase__ = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCAmelCase__ = torch.tensor([[7.0_999e-04, 9.9_883e-01, 4.5_580e-04]] )
elif model_name == "xclip-base-patch16":
UpperCAmelCase__ = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCAmelCase__ = torch.tensor([[7.6_937e-04, 9.9_728e-01, 1.9_473e-03]] )
elif model_name == "xclip-large-patch14":
UpperCAmelCase__ = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase__ = torch.tensor([[3.3_877e-04, 9.9_937e-01, 2.8_888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCAmelCase__ = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCAmelCase__ = torch.tensor([[3.8_554e-04, 9.9_929e-01, 3.2_754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCAmelCase__ = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCAmelCase__ = torch.tensor([[7.1_890e-06, 9.9_994e-01, 5.6_559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCAmelCase__ = torch.tensor([[1.0_320e-05, 9.9_993e-01, 6.2_435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCAmelCase__ = torch.tensor([[4.1_377e-06, 9.9_990e-01, 9.8_386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCAmelCase__ = torch.tensor([[4.1_347e-05, 9.9_962e-01, 3.3_411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCAmelCase__ = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCAmelCase__ = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCAmelCase__ = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCAmelCase__ = torch.tensor([[9.8_219e-04, 9.9_593e-01, 3.0_863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCAmelCase__ = torch.tensor([[3.5_082e-04, 9.9_785e-01, 1.7_966e-03]] )
else:
raise ValueError(f"Model name {model_name} not supported" )
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(snake_case__ , organization='nielsr' )
processor.push_to_hub(snake_case__ , organization='nielsr' )
slow_tokenizer.push_to_hub(snake_case__ , organization='nielsr' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_UpperCamelCase = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 335 | 1 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_UpperCamelCase = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
_UpperCamelCase = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def UpperCamelCase_( ) -> Dict:
UpperCAmelCase__ = calculate_rouge(snake_case__ , snake_case__ , bootstrap_aggregation=snake_case__ , rouge_keys=['rouge2', 'rougeL'] )
assert isinstance(snake_case__ , snake_case__ )
UpperCAmelCase__ = calculate_rouge(snake_case__ , snake_case__ , bootstrap_aggregation=snake_case__ , rouge_keys=['rouge2'] )
assert (
pd.DataFrame(no_aggregation['rouge2'] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['rouge2'] ).fmeasure.mean()
)
def UpperCamelCase_( ) -> Dict:
UpperCAmelCase__ = 'rougeLsum'
UpperCAmelCase__ = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=[k] )[k]
UpperCAmelCase__ = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def UpperCamelCase_( ) -> List[str]:
UpperCAmelCase__ = ['rouge1', 'rouge2', 'rougeL']
UpperCAmelCase__ = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=snake_case__ )
UpperCAmelCase__ = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=snake_case__ )
assert score_sep == score_no_sep
def UpperCamelCase_( ) -> Any:
UpperCAmelCase__ = [
'Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.',
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .',
]
UpperCAmelCase__ = [
'Margot Frank, died in 1945, a month earlier than previously thought.',
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'
' the final seconds on board Flight 9525.',
]
assert calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ ) == calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ )
def UpperCamelCase_( ) -> Optional[Any]:
UpperCAmelCase__ = [
'" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '
]
UpperCAmelCase__ = [
' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'
]
UpperCAmelCase__ = calculate_rouge(snake_case__ , snake_case__ , rouge_keys=['rougeLsum'] , newline_sep=snake_case__ )['rougeLsum']
UpperCAmelCase__ = calculate_rouge(snake_case__ , snake_case__ , rouge_keys=['rougeLsum'] )['rougeLsum']
assert new_score > prev_score
def UpperCamelCase_( ) -> int:
UpperCAmelCase__ = Path('examples/seq2seq/test_data/wmt_en_ro' )
UpperCAmelCase__ = calculate_rouge_path(data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) )
assert isinstance(snake_case__ , snake_case__ )
UpperCAmelCase__ = calculate_rouge_path(
data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) , bootstrap_aggregation=snake_case__ )
assert isinstance(snake_case__ , snake_case__ )
| 335 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def UpperCamelCase_( snake_case__: Optional[int] , snake_case__: List[Any] , snake_case__: Union[str, Any] ) -> Tuple:
UpperCAmelCase__ = OmegaConf.load(snake_case__ )
UpperCAmelCase__ = torch.load(snake_case__ , map_location='cpu' )['model']
UpperCAmelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
UpperCAmelCase__ = {}
UpperCAmelCase__ = 'first_stage_model.'
for key in keys:
if key.startswith(snake_case__ ):
UpperCAmelCase__ = state_dict[key]
# extract state_dict for UNetLDM
UpperCAmelCase__ = {}
UpperCAmelCase__ = 'model.diffusion_model.'
for key in keys:
if key.startswith(snake_case__ ):
UpperCAmelCase__ = state_dict[key]
UpperCAmelCase__ = config.model.params.first_stage_config.params
UpperCAmelCase__ = config.model.params.unet_config.params
UpperCAmelCase__ = VQModel(**snake_case__ ).eval()
vqvae.load_state_dict(snake_case__ )
UpperCAmelCase__ = UNetLDMModel(**snake_case__ ).eval()
unet.load_state_dict(snake_case__ )
UpperCAmelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=snake_case__ , )
UpperCAmelCase__ = LDMPipeline(snake_case__ , snake_case__ , snake_case__ )
pipeline.save_pretrained(snake_case__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
_UpperCamelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 335 | 1 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = inspect.getfile(accelerate.test_utils )
UpperCAmelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
UpperCAmelCase__ = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = F"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split()
UpperCAmelCase__ = [sys.executable] + distributed_args
execute_subprocess_async(__a , env=os.environ.copy() )
| 335 |
# flake8: noqa
# Lint as: python3
_UpperCamelCase = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 335 | 1 |
from __future__ import annotations
from fractions import Fraction
def UpperCamelCase_( snake_case__: int , snake_case__: int ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCamelCase_( snake_case__: int ) -> list[str]:
UpperCAmelCase__ = []
UpperCAmelCase__ = 11
UpperCAmelCase__ = int('1' + '0' * digit_len )
for num in range(snake_case__ , snake_case__ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(snake_case__ , snake_case__ ):
solutions.append(f"{num}/{den}" )
den += 1
num += 1
UpperCAmelCase__ = 10
return solutions
def UpperCamelCase_( snake_case__: int = 2 ) -> int:
UpperCAmelCase__ = 1.0
for fraction in fraction_list(snake_case__ ):
UpperCAmelCase__ = Fraction(snake_case__ )
result *= frac.denominator / frac.numerator
return int(snake_case__ )
if __name__ == "__main__":
print(solution())
| 335 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """sew-d"""
def __init__(self , __a=32 , __a=768 , __a=12 , __a=12 , __a=3072 , __a=2 , __a=512 , __a=256 , __a=True , __a=True , __a=("p2c", "c2p") , __a="layer_norm" , __a="gelu_python" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.1 , __a=0.02 , __a=1E-7 , __a=1E-5 , __a="group" , __a="gelu" , __a=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __a=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __a=False , __a=128 , __a=16 , __a=True , __a=0.05 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=0 , __a="mean" , __a=False , __a=False , __a=256 , __a=0 , __a=1 , __a=2 , **__a , ) -> str:
"""simple docstring"""
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = feat_extract_norm
UpperCAmelCase__ = feat_extract_activation
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = conv_bias
UpperCAmelCase__ = num_conv_pos_embeddings
UpperCAmelCase__ = num_conv_pos_embedding_groups
UpperCAmelCase__ = len(self.conv_dim )
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = squeeze_factor
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = position_buckets
UpperCAmelCase__ = share_att_key
UpperCAmelCase__ = relative_attention
UpperCAmelCase__ = norm_rel_ebd
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = feat_proj_dropout
UpperCAmelCase__ = final_dropout
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = feature_layer_norm_eps
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase__ = apply_spec_augment
UpperCAmelCase__ = mask_time_prob
UpperCAmelCase__ = mask_time_length
UpperCAmelCase__ = mask_time_min_masks
UpperCAmelCase__ = mask_feature_prob
UpperCAmelCase__ = mask_feature_length
UpperCAmelCase__ = mask_feature_min_masks
# ctc loss
UpperCAmelCase__ = ctc_loss_reduction
UpperCAmelCase__ = ctc_zero_infinity
# sequence classification
UpperCAmelCase__ = use_weighted_layer_sum
UpperCAmelCase__ = classifier_proj_size
@property
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 335 | 1 |
def UpperCamelCase_( snake_case__: int ) -> int:
UpperCAmelCase__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def UpperCamelCase_( snake_case__: int ) -> int:
UpperCAmelCase__ = 0
while number > 0:
UpperCAmelCase__ = number % 10
sum_of_digits += last_digit
UpperCAmelCase__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def UpperCamelCase_( snake_case__: int = 1_00 ) -> int:
UpperCAmelCase__ = factorial(snake_case__ )
UpperCAmelCase__ = split_and_add(snake_case__ )
return result
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 335 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_UpperCamelCase = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCamelCase_( snake_case__: int ) -> str:
for pegasus_name, hf_name in PATTERNS:
UpperCAmelCase__ = k.replace(snake_case__ , snake_case__ )
return k
def UpperCamelCase_( snake_case__: dict , snake_case__: dict ) -> PegasusForConditionalGeneration:
UpperCAmelCase__ = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
UpperCAmelCase__ = PegasusConfig(**snake_case__ )
UpperCAmelCase__ = PegasusForConditionalGeneration(snake_case__ )
UpperCAmelCase__ = torch_model.model.state_dict()
UpperCAmelCase__ = {}
for k, v in tf_weights.items():
UpperCAmelCase__ = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
UpperCAmelCase__ = v.T
UpperCAmelCase__ = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
UpperCAmelCase__ = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
UpperCAmelCase__ = mapping['shared.weight']
UpperCAmelCase__ = mapping['shared.weight']
UpperCAmelCase__ = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
UpperCAmelCase__ , UpperCAmelCase__ = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
UpperCAmelCase__ = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def UpperCamelCase_( snake_case__: int="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
UpperCAmelCase__ = tf.train.list_variables(snake_case__ )
UpperCAmelCase__ = {}
UpperCAmelCase__ = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
UpperCAmelCase__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCAmelCase__ = tf.train.load_variable(snake_case__ , snake_case__ )
UpperCAmelCase__ = array
return tf_weights
def UpperCamelCase_( snake_case__: str , snake_case__: str ) -> Optional[Any]:
# save tokenizer first
UpperCAmelCase__ = Path(snake_case__ ).parent.name
UpperCAmelCase__ = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
UpperCAmelCase__ = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
UpperCAmelCase__ = get_tf_weights_as_numpy(snake_case__ )
UpperCAmelCase__ = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
UpperCAmelCase__ = task_specific_params
UpperCAmelCase__ = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
UpperCAmelCase__ = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_UpperCamelCase = parser.parse_args()
if args.save_dir is None:
_UpperCamelCase = Path(args.tf_ckpt_path).parent.name
_UpperCamelCase = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 335 | 1 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , *__a , **__a ) -> None:
"""simple docstring"""
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , __a , )
super().__init__(*__a , **__a )
| 335 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase :
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = 13
UpperCAmelCase__ = 7
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = 99
UpperCAmelCase__ = 384
UpperCAmelCase__ = 2
UpperCAmelCase__ = 4
UpperCAmelCase__ = 37
UpperCAmelCase__ = 'gelu'
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 512
UpperCAmelCase__ = 16
UpperCAmelCase__ = 2
UpperCAmelCase__ = 0.02
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = 128
UpperCAmelCase__ = 2
UpperCAmelCase__ = 9
UpperCAmelCase__ = 1
UpperCAmelCase__ = None
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel(config=__a )
UpperCAmelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCAmelCase__ = [input_ids, input_mask]
UpperCAmelCase__ = model(__a )
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertForMaskedLM(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFConvBertForSequenceClassification(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = TFConvBertForMultipleChoice(config=__a )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFConvBertForTokenClassification(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertForQuestionAnswering(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__a , hidden_size=37 )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
UpperCAmelCase__ = True
if hasattr(__a , 'use_cache' ):
UpperCAmelCase__ = True
UpperCAmelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a )
for model_class in self.all_model_classes:
UpperCAmelCase__ = self._prepare_for_class(__a , __a )
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = len(model(__a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a , saved_model=__a )
UpperCAmelCase__ = os.path.join(__a , 'saved_model' , '1' )
UpperCAmelCase__ = tf.keras.models.load_model(__a )
UpperCAmelCase__ = model(__a )
if self.is_encoder_decoder:
UpperCAmelCase__ = outputs['encoder_hidden_states']
UpperCAmelCase__ = outputs['encoder_attentions']
else:
UpperCAmelCase__ = outputs['hidden_states']
UpperCAmelCase__ = outputs['attentions']
self.assertEqual(len(__a ) , __a )
UpperCAmelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(__a )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
UpperCAmelCase__ = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a )
UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a )
def check_decoder_attentions_output(__a ):
UpperCAmelCase__ = len(__a )
self.assertEqual(out_len % 2 , 0 )
UpperCAmelCase__ = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__a ):
UpperCAmelCase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@require_tf
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
UpperCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = model(__a )[0]
UpperCAmelCase__ = [1, 6, 768]
self.assertEqual(output.shape , __a )
UpperCAmelCase__ = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
| 335 | 1 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , *__a , **__a ) -> None:
"""simple docstring"""
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , __a , )
super().__init__(*__a , **__a )
| 335 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCamelCase )
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , **__a ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__a )
requires_backends(self , 'vision' )
requires_backends(self , 'torch' )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(__a )
def UpperCamelCase__ (self , **__a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCAmelCase__ = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
UpperCAmelCase__ = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
UpperCAmelCase__ = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
UpperCAmelCase__ = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
UpperCAmelCase__ = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCAmelCase__ = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
UpperCAmelCase__ = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
UpperCAmelCase__ = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
UpperCAmelCase__ = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
UpperCAmelCase__ = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
UpperCAmelCase__ = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
UpperCAmelCase__ = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , __a , *__a , __a=None , __a=None , **__a ) -> List[str]:
"""simple docstring"""
return super().__call__(__a , *__a , num_workers=__a , batch_size=__a , **__a )
def UpperCamelCase__ (self , __a , __a=64 , __a = 0 , __a = 512 / 1500 , __a = 32 , __a = 1 , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = load_image(__a )
UpperCAmelCase__ = self.image_processor.size['longest_edge']
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.generate_crop_boxes(
__a , __a , __a , __a , __a , __a )
UpperCAmelCase__ = self.image_processor(images=__a , return_tensors='pt' )
with self.device_placement():
if self.framework == "pt":
UpperCAmelCase__ = self.get_inference_context()
with inference_context():
UpperCAmelCase__ = self._ensure_tensor_on_device(__a , device=self.device )
UpperCAmelCase__ = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) )
UpperCAmelCase__ = image_embeddings
UpperCAmelCase__ = grid_points.shape[1]
UpperCAmelCase__ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None' )
for i in range(0 , __a , __a ):
UpperCAmelCase__ = grid_points[:, i : i + points_per_batch, :, :]
UpperCAmelCase__ = input_labels[:, i : i + points_per_batch]
UpperCAmelCase__ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase__ (self , __a , __a=0.88 , __a=0.95 , __a=0 , __a=1 , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = model_inputs.pop('input_boxes' )
UpperCAmelCase__ = model_inputs.pop('is_last' )
UpperCAmelCase__ = model_inputs.pop('original_sizes' ).tolist()
UpperCAmelCase__ = model_inputs.pop('reshaped_input_sizes' ).tolist()
UpperCAmelCase__ = self.model(**__a )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCAmelCase__ = model_outputs['pred_masks']
UpperCAmelCase__ = self.image_processor.post_process_masks(
__a , __a , __a , __a , binarize=__a )
UpperCAmelCase__ = model_outputs['iou_scores']
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __a , __a , __a , __a , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase__ (self , __a , __a=False , __a=False , __a=0.7 , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores' ) )
all_masks.extend(model_output.pop('masks' ) )
all_boxes.append(model_output.pop('boxes' ) )
UpperCAmelCase__ = torch.cat(__a )
UpperCAmelCase__ = torch.cat(__a )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.post_process_for_mask_generation(
__a , __a , __a , __a )
UpperCAmelCase__ = defaultdict(__a )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__a )
UpperCAmelCase__ = {}
if output_rle_mask:
UpperCAmelCase__ = rle_mask
if output_bboxes_mask:
UpperCAmelCase__ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 335 | 1 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
'''simple docstring'''
def __init__(self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = ''
UpperCAmelCase__ = ''
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = 256
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
def UpperCamelCase__ (self , __a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = cva.imread(__a , 0 )
UpperCAmelCase__ = copy.deepcopy(self.img )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
UpperCAmelCase__ = np.sum(__a )
for i in range(len(__a ) ):
UpperCAmelCase__ = x[i] / self.k
self.sk += prk
UpperCAmelCase__ = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase__ = int(last % last )
UpperCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__a )
UpperCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase__ = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase__ = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_UpperCamelCase = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
_UpperCamelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 335 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be trained."""} )
__SCREAMING_SNAKE_CASE = field(
default="""./""" , metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path of training dataset."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
__SCREAMING_SNAKE_CASE = field(default=2 , metadata={"""help""": """Batch size for training."""} )
__SCREAMING_SNAKE_CASE = field(default=2 , metadata={"""help""": """Batch size for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(default=0.1 , metadata={"""help""": """Value of weight decay."""} )
__SCREAMING_SNAKE_CASE = field(
default=10000 , metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} )
__SCREAMING_SNAKE_CASE = field(default=2E-4 , metadata={"""help""": """Learning rate fo training."""} )
__SCREAMING_SNAKE_CASE = field(default="""cosine""" , metadata={"""help""": """Learning rate."""} )
__SCREAMING_SNAKE_CASE = field(
default=750 , metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} )
__SCREAMING_SNAKE_CASE = field(
default=16 , metadata={"""help""": """Number of gradient accumulation steps."""} )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} )
__SCREAMING_SNAKE_CASE = field(default=50000 , metadata={"""help""": """Maximum number of training steps."""} )
__SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=1024 , metadata={"""help""": """Sequence lengths used for training."""} )
__SCREAMING_SNAKE_CASE = field(default=1 , metadata={"""help""": """Training seed."""} )
__SCREAMING_SNAKE_CASE = field(
default=1024 , metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """If True the data is pretokenized."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
__SCREAMING_SNAKE_CASE = field(default=2 , metadata={"""help""": """Batch size used for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=1024 , metadata={"""help""": """Length of sequences to be evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Number of workers used for code evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Sample from the language model's output distribution."""} )
__SCREAMING_SNAKE_CASE = field(default=0.2 , metadata={"""help""": """Sampling temperature used for generation."""} )
__SCREAMING_SNAKE_CASE = field(default=256 , metadata={"""help""": """Maximum number of newly generated tokens."""} )
__SCREAMING_SNAKE_CASE = field(default=0 , metadata={"""help""": """Top-k parameter used for generation."""} )
__SCREAMING_SNAKE_CASE = field(default=0.95 , metadata={"""help""": """Top-p parameter used for nucleus sampling."""} )
__SCREAMING_SNAKE_CASE = field(default=10 , metadata={"""help""": """Number of generations to run in parallel."""} )
__SCREAMING_SNAKE_CASE = field(
default=200 , metadata={"""help""": """Number of completions to generate for each sample."""} )
__SCREAMING_SNAKE_CASE = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default="""eval_results.json""" , metadata={"""help""": """Random seed used for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default="""0""" , metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} )
__SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} , )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} , )
__SCREAMING_SNAKE_CASE = field(
default="""transformersbook/codeparrot""" , metadata={"""help""": """Folder or name of dataset to process."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot-clean""" , metadata={"""help""": """Folder to save processed processed dataset."""} )
__SCREAMING_SNAKE_CASE = field(
default=100000 , metadata={"""help""": """Number of files to save per JSON output file."""} )
__SCREAMING_SNAKE_CASE = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
__SCREAMING_SNAKE_CASE = field(
default=1000 , metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=100 , metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.25 , metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=1.5 , metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.7 , metadata={"""help""": """Probability for filtering config, test and uncommon files."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """If True, near-duplicate samples are removed."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.85 , metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""gpt2""" , metadata={"""help""": """Base tokenizer to build new tokenizer from."""} )
__SCREAMING_SNAKE_CASE = field(
default="""transformersbook/codeparrot-train""" , metadata={"""help""": """Dataset to train tokenizer on."""} )
__SCREAMING_SNAKE_CASE = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
__SCREAMING_SNAKE_CASE = field(default=200000 , metadata={"""help""": """Number of examples to train tokenizer on."""} )
__SCREAMING_SNAKE_CASE = field(
default=32768 , metadata={"""help""": """Number of examples to train the tokenizer on."""} )
__SCREAMING_SNAKE_CASE = field(default="""codeparrot""" , metadata={"""help""": """Name of new tokenizer."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Push saved tokenizer to the hub."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path to the dataset to pretokenize."""} )
__SCREAMING_SNAKE_CASE = field(
default="""tokenized-codeparrot-train""" , metadata={"""help""": """Repo name of the pretokenized data."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Number of workers used for code evaluation."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""gpt2-large""" , metadata={"""help""": """Configuration to use for model initialization."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Tokenizer attached to model."""} )
__SCREAMING_SNAKE_CASE = field(default="""codeparrot""" , metadata={"""help""": """Name of the created model."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Push saved tokenizer to the hub."""} )
| 335 | 1 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 335 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=4 , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_attention_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_choices
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_attention_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = True
UpperCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = FlaxRobertaModelTester(self )
@slow
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase__ = model_class_name.from_pretrained('roberta-base' , from_pt=__a )
UpperCAmelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__a )
| 335 | 1 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_UpperCamelCase = logging.get_logger(__name__)
def UpperCamelCase_( snake_case__: Dict ) -> Any:
UpperCAmelCase__ = r'\w+[.]\d+'
UpperCAmelCase__ = re.findall(snake_case__ , snake_case__ )
for pat in pats:
UpperCAmelCase__ = key.replace(snake_case__ , '_'.join(pat.split('.' ) ) )
return key
def UpperCamelCase_( snake_case__: Optional[Any] , snake_case__: List[Any] , snake_case__: Union[str, Any] ) -> Tuple:
UpperCAmelCase__ = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase__ = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase__ = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase__ = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase__ = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase__ = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase__ = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase__ = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCamelCase_( snake_case__: Optional[int] , snake_case__: Tuple , snake_case__: List[str]=42 ) -> Optional[Any]:
# Step 1: Convert pytorch tensor to numpy
UpperCAmelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase__ = flax_model.init_weights(PRNGKey(snake_case__ ) )
UpperCAmelCase__ = flatten_dict(snake_case__ )
UpperCAmelCase__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase__ = rename_key(snake_case__ )
UpperCAmelCase__ = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
UpperCAmelCase__ , UpperCAmelCase__ = rename_key_and_reshape_tensor(snake_case__ , snake_case__ , snake_case__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
UpperCAmelCase__ = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
| 335 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , *__a , **__a ) -> None:
"""simple docstring"""
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , __a , )
super().__init__(*__a , **__a )
| 335 | 1 |
_UpperCamelCase = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 335 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 | 1 |
import argparse
import struct
import unittest
class lowercase :
'''simple docstring'''
def __init__(self , __a ) -> None:
"""simple docstring"""
UpperCAmelCase__ = data
# Initialize hash values
UpperCAmelCase__ = [
0x6a09_e667,
0xbb67_ae85,
0x3c6e_f372,
0xa54f_f53a,
0x510e_527f,
0x9b05_688c,
0x1f83_d9ab,
0x5be0_cd19,
]
# Initialize round constants
UpperCAmelCase__ = [
0x428a_2f98,
0x7137_4491,
0xb5c0_fbcf,
0xe9b5_dba5,
0x3956_c25b,
0x59f1_11f1,
0x923f_82a4,
0xab1c_5ed5,
0xd807_aa98,
0x1283_5b01,
0x2431_85be,
0x550c_7dc3,
0x72be_5d74,
0x80de_b1fe,
0x9bdc_06a7,
0xc19b_f174,
0xe49b_69c1,
0xefbe_4786,
0x0fc1_9dc6,
0x240c_a1cc,
0x2de9_2c6f,
0x4a74_84aa,
0x5cb0_a9dc,
0x76f9_88da,
0x983e_5152,
0xa831_c66d,
0xb003_27c8,
0xbf59_7fc7,
0xc6e0_0bf3,
0xd5a7_9147,
0x06ca_6351,
0x1429_2967,
0x27b7_0a85,
0x2e1b_2138,
0x4d2c_6dfc,
0x5338_0d13,
0x650a_7354,
0x766a_0abb,
0x81c2_c92e,
0x9272_2c85,
0xa2bf_e8a1,
0xa81a_664b,
0xc24b_8b70,
0xc76c_51a3,
0xd192_e819,
0xd699_0624,
0xf40e_3585,
0x106a_a070,
0x19a4_c116,
0x1e37_6c08,
0x2748_774c,
0x34b0_bcb5,
0x391c_0cb3,
0x4ed8_aa4a,
0x5b9c_ca4f,
0x682e_6ff3,
0x748f_82ee,
0x78a5_636f,
0x84c8_7814,
0x8cc7_0208,
0x90be_fffa,
0xa450_6ceb,
0xbef9_a3f7,
0xc671_78f2,
]
UpperCAmelCase__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCamelCase__ (__a ) -> bytes:
"""simple docstring"""
UpperCAmelCase__ = B'\x80' + (B'\x00' * (63 - (len(__a ) + 8) % 64))
UpperCAmelCase__ = struct.pack('>Q' , (len(__a ) * 8) )
return data + padding + big_endian_integer
def UpperCamelCase__ (self ) -> None:
"""simple docstring"""
UpperCAmelCase__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCAmelCase__ = list(struct.unpack('>16L' , __a ) )
# add 48 0-ed integers
words += [0] * 48
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCAmelCase__ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
UpperCAmelCase__ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
UpperCAmelCase__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
UpperCAmelCase__ = self.ror(__a , 6 ) ^ self.ror(__a , 11 ) ^ self.ror(__a , 25 )
UpperCAmelCase__ = (e & f) ^ ((~e & 0xffff_ffff) & g)
UpperCAmelCase__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
UpperCAmelCase__ = self.ror(__a , 2 ) ^ self.ror(__a , 13 ) ^ self.ror(__a , 22 )
UpperCAmelCase__ = (a & b) ^ (a & c) ^ (b & c)
UpperCAmelCase__ = (sa + maj) % 0x1_0000_0000
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
UpperCAmelCase__ = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCAmelCase__ = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
UpperCAmelCase__ = ''.join([hex(__a )[2:].zfill(8 ) for value in self.hashes] )
def UpperCamelCase__ (self , __a , __a ) -> int:
"""simple docstring"""
return 0xffff_ffff & (value << (32 - rotations)) | (value >> rotations)
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> None:
"""simple docstring"""
import hashlib
UpperCAmelCase__ = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(__a ).hash , hashlib.shaaaa(__a ).hexdigest() )
def UpperCamelCase_( ) -> None:
import doctest
doctest.testmod()
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
UpperCAmelCase__ = f.read()
else:
UpperCAmelCase__ = bytes(snake_case__ , 'utf-8' )
print(SHAaaa(snake_case__ ).hash )
if __name__ == "__main__":
main()
| 335 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self , __a ) -> List[Any]:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
UpperCAmelCase__ = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__a )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__a , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = 'sgugger/tiny-distilbert-classification'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , only_pretrain_model=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = AutoConfig.from_pretrained(__a )
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__a , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a , [config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = AutoConfig.from_pretrained(__a )
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a , [config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = AutoConfig.from_pretrained(__a )
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a , [config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = 'patrickvonplaten/t5-tiny-random'
UpperCAmelCase__ = AutoConfig.from_pretrained(__a )
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a , configs=[config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__a , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__a , save_to_csv=__a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(__a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(__a , 'env.csv' ) , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
benchmark.run()
self.assertTrue(Path(os.path.join(__a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__a , 'env.csv' ) ).exists() )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__a ):
self.assertTrue(hasattr(__a , 'sequential' ) )
self.assertTrue(hasattr(__a , 'cumulative' ) )
self.assertTrue(hasattr(__a , 'current' ) )
self.assertTrue(hasattr(__a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__a , 'log.txt' ) , log_print=__a , trace_memory_line_by_line=__a , eager_mode=__a , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__a , 'log.txt' ) ).exists() )
| 335 | 1 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = GPTSwaTokenizer
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ = GPTSwaTokenizer(__a , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ (self , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = 'This is a test'
UpperCAmelCase__ = 'This is a test'
return input_text, output_text
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = '<s>'
UpperCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(__a ) , 2000 )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = GPTSwaTokenizer(__a )
UpperCAmelCase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(__a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [465, 287, 265, 631, 842] )
UpperCAmelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
__a , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(__a )
# fmt: off
self.assertListEqual(
__a , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = GPTSwaTokenizer(__a )
UpperCAmelCase__ = ['This is a test', 'I was born in 92000, and this is falsé.']
UpperCAmelCase__ = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__a , __a ):
self.assertListEqual(tokenizer.encode_fast(__a ) , __a )
# Test that decode_fast returns the input text
for text, token_ids in zip(__a , __a ):
self.assertEqual(tokenizer.decode_fast(__a ) , __a )
@slow
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
UpperCAmelCase__ = {'input_ids': [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name='AI-Sweden/gpt-sw3-126m' , sequences=__a , )
| 335 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 335 | 1 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = get_activation('swish' )
self.assertIsInstance(__a , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = get_activation('silu' )
self.assertIsInstance(__a , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = get_activation('mish' )
self.assertIsInstance(__a , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = get_activation('gelu' )
self.assertIsInstance(__a , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 335 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__(self , *,
__a = 4 , __a = 768 , __a , __a , ) -> str:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Parameter(torch.zeros(__a ) )
# parameters for additional clip time embeddings
UpperCAmelCase__ = nn.Linear(__a , __a )
UpperCAmelCase__ = nn.Linear(__a , __a )
# parameters for encoder hidden states
UpperCAmelCase__ = clip_extra_context_tokens
UpperCAmelCase__ = nn.Linear(
__a , self.clip_extra_context_tokens * cross_attention_dim )
UpperCAmelCase__ = nn.Linear(__a , __a )
UpperCAmelCase__ = nn.LayerNorm(__a )
def UpperCamelCase__ (self , *, __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCAmelCase__ = image_embeddings.shape[0]
UpperCAmelCase__ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCAmelCase__ = classifier_free_guidance_embeddings.expand(
__a , -1 )
UpperCAmelCase__ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCAmelCase__ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCAmelCase__ = self.embedding_proj(__a )
UpperCAmelCase__ = self.clip_image_embeddings_project_to_time_embeddings(__a )
UpperCAmelCase__ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCAmelCase__ = self.clip_extra_context_tokens_proj(__a )
UpperCAmelCase__ = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens )
UpperCAmelCase__ = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCAmelCase__ = self.encoder_hidden_states_proj(__a )
UpperCAmelCase__ = self.text_encoder_hidden_states_norm(__a )
UpperCAmelCase__ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 335 | 1 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
_UpperCamelCase = '''src/diffusers'''
# Matches is_xxx_available()
_UpperCamelCase = re.compile(R'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
_UpperCamelCase = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
_UpperCamelCase = '''
{0} = None
'''
_UpperCamelCase = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
_UpperCamelCase = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def UpperCamelCase_( snake_case__: List[str] ) -> Dict:
UpperCAmelCase__ = _re_backend.findall(snake_case__ )
if len(snake_case__ ) == 0:
return None
return "_and_".join(snake_case__ )
def UpperCamelCase_( ) -> Optional[int]:
with open(os.path.join(snake_case__ , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCAmelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCAmelCase__ = 0
UpperCAmelCase__ = {}
# Go through the end of the file
while line_index < len(snake_case__ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCAmelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
UpperCAmelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(snake_case__ ) and len(lines[line_index] ) > 1:
UpperCAmelCase__ = lines[line_index]
UpperCAmelCase__ = _re_single_line_import.search(snake_case__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(snake_case__ ) > 0:
UpperCAmelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: List[Any] ) -> Union[str, Any]:
if name.isupper():
return DUMMY_CONSTANT.format(snake_case__ )
elif name.islower():
return DUMMY_FUNCTION.format(snake_case__ , snake_case__ )
else:
return DUMMY_CLASS.format(snake_case__ , snake_case__ )
def UpperCamelCase_( snake_case__: Any=None ) -> Tuple:
if backend_specific_objects is None:
UpperCAmelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCAmelCase__ = {}
for backend, objects in backend_specific_objects.items():
UpperCAmelCase__ = '[' + ', '.join(f"\"{b}\"" for b in backend.split('_and_' ) ) + ']'
UpperCAmelCase__ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(snake_case__ , snake_case__ ) for o in objects] )
UpperCAmelCase__ = dummy_file
return dummy_files
def UpperCamelCase_( snake_case__: List[Any]=False ) -> Optional[int]:
UpperCAmelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCAmelCase__ = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
UpperCAmelCase__ = os.path.join(snake_case__ , 'utils' )
UpperCAmelCase__ = {
backend: os.path.join(snake_case__ , f"dummy_{short_names.get(snake_case__ , snake_case__ )}_objects.py" )
for backend in dummy_files.keys()
}
UpperCAmelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(snake_case__ ):
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCAmelCase__ = f.read()
else:
UpperCAmelCase__ = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"Updating diffusers.utils.dummy_{short_names.get(snake_case__ , snake_case__ )}_objects.py as the main "
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
f"diffusers.utils.dummy_{short_names.get(snake_case__ , snake_case__ )}_objects.py. Run `make fix-copies` "
'to fix this.' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_UpperCamelCase = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 335 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BioGptTokenizer
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
UpperCAmelCase__ = dict(zip(__a , range(len(__a ) ) ) )
UpperCAmelCase__ = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(__a ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(__a ) )
def UpperCamelCase__ (self , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = 'lower newer'
UpperCAmelCase__ = 'lower newer'
return input_text, output_text
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase__ = 'lower'
UpperCAmelCase__ = ['low', 'er</w>']
UpperCAmelCase__ = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase__ = tokens + ['<unk>']
UpperCAmelCase__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
@slow
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
UpperCAmelCase__ = tokenizer.encode('sequence builders' , add_special_tokens=__a )
UpperCAmelCase__ = tokenizer.encode('multi-sequence build' , add_special_tokens=__a )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__a , __a )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 335 | 1 |
import numpy as np
import datasets
_UpperCamelCase = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
_UpperCamelCase = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
_UpperCamelCase = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'X': datasets.Sequence(datasets.Value('float' , id='sequence' ) , id='X' ),
} ) , )
def UpperCamelCase__ (self , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = np.array(__a )
UpperCAmelCase__ = np.array(__a )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('Expected `X` to be a 2D vector' )
if len(reference_distribution.shape ) != 2:
raise ValueError('Expected `reference_distribution` to be a 2D vector' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension' )
# Get mahalanobis distance for each prediction
UpperCAmelCase__ = X - np.mean(__a )
UpperCAmelCase__ = np.cov(reference_distribution.T )
try:
UpperCAmelCase__ = np.linalg.inv(__a )
except np.linalg.LinAlgError:
UpperCAmelCase__ = np.linalg.pinv(__a )
UpperCAmelCase__ = np.dot(__a , __a )
UpperCAmelCase__ = np.dot(__a , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 335 |
class lowercase : # Public class to implement a graph
'''simple docstring'''
def __init__(self , __a , __a , __a ) -> None:
"""simple docstring"""
UpperCAmelCase__ = row
UpperCAmelCase__ = col
UpperCAmelCase__ = graph
def UpperCamelCase__ (self , __a , __a , __a ) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCamelCase__ (self , __a , __a , __a ) -> None:
"""simple docstring"""
UpperCAmelCase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
UpperCAmelCase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
UpperCAmelCase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __a )
def UpperCamelCase__ (self ) -> int: # And finally, count all islands.
"""simple docstring"""
UpperCAmelCase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
UpperCAmelCase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__a , __a , __a )
count += 1
return count
| 335 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCamelCase_( snake_case__: List[Any] ) -> Any:
UpperCAmelCase__ = filter(lambda snake_case__ : p.requires_grad , model.parameters() )
UpperCAmelCase__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_UpperCamelCase = logging.getLogger(__name__)
def UpperCamelCase_( snake_case__: str , snake_case__: List[str] ) -> Union[str, Any]:
if metric == "rouge2":
UpperCAmelCase__ = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
UpperCAmelCase__ = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
UpperCAmelCase__ = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
UpperCAmelCase__ = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
UpperCAmelCase__ = ModelCheckpoint(
dirpath=snake_case__ , filename=snake_case__ , monitor=f"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCamelCase_( snake_case__: Optional[Any] , snake_case__: Any ) -> int:
return EarlyStopping(
monitor=f"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=snake_case__ , verbose=snake_case__ , )
class lowercase ( pl.Callback ):
'''simple docstring'''
def UpperCamelCase__ (self , __a , __a ) -> int:
"""simple docstring"""
UpperCAmelCase__ = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ (self , __a , __a , __a , __a=True ) -> None:
"""simple docstring"""
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****" )
UpperCAmelCase__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
UpperCAmelCase__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCAmelCase__ = od / 'test_results.txt'
UpperCAmelCase__ = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCAmelCase__ = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
UpperCAmelCase__ = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , 'a+' ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCAmelCase__ = metrics[key]
if isinstance(__a , torch.Tensor ):
UpperCAmelCase__ = val.item()
UpperCAmelCase__ = F"{key}: {val:.6f}\n"
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
UpperCAmelCase__ = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(__a )
@rank_zero_only
def UpperCamelCase__ (self , __a , __a ) -> int:
"""simple docstring"""
try:
UpperCAmelCase__ = pl_module.model.model.num_parameters()
except AttributeError:
UpperCAmelCase__ = pl_module.model.num_parameters()
UpperCAmelCase__ = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def UpperCamelCase__ (self , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , 'test' )
@rank_zero_only
def UpperCamelCase__ (self , __a , __a ) -> str:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 335 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_UpperCamelCase = Lock()
def UpperCamelCase_( snake_case__: Optional[Any] , snake_case__: Optional[int] , snake_case__: Tuple , snake_case__: Tuple , snake_case__: Tuple , snake_case__: Dict , snake_case__: Any ) -> str:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase__ = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase__ = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def UpperCamelCase_( snake_case__: Any ) -> Tuple:
UpperCAmelCase__ = []
UpperCAmelCase__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase__ = Pipe()
UpperCAmelCase__ = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase__ = temp_rs
UpperCAmelCase__ = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
UpperCAmelCase__ = Pipe()
UpperCAmelCase__ = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase__ = temp_rs
UpperCAmelCase__ = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
UpperCAmelCase__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase_( ) -> Dict:
UpperCAmelCase__ = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
UpperCAmelCase__ = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main()
| 335 | 1 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCamelCase_( snake_case__: Dict , snake_case__: Optional[int] ) -> Union[str, Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
UpperCAmelCase__ = flax_key_tuple[:-1] + ('weight',)
UpperCAmelCase__ = torch.permute(snake_case__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ):
# linear layer
UpperCAmelCase__ = flax_key_tuple[:-1] + ('weight',)
UpperCAmelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase__ = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def UpperCamelCase_( snake_case__: str , snake_case__: Dict , snake_case__: Union[str, Any] ) -> str:
if "metadata" in layer:
UpperCAmelCase__ = layer.split('metadata' )
UpperCAmelCase__ = ''.join(split_layer[0] )[:-1]
UpperCAmelCase__ = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
UpperCAmelCase__ = layer.split('kvstore' )
UpperCAmelCase__ = ''.join(split_layer[0] )[:-1]
UpperCAmelCase__ = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
UpperCAmelCase__ = layer.split('/' )
UpperCAmelCase__ = '/'.join(split_layer[:-1] )
UpperCAmelCase__ = (split_layer[-1],)
if "kvstore/path" in layer:
UpperCAmelCase__ = f"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
UpperCAmelCase__ = 'file'
else:
UpperCAmelCase__ = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCamelCase_( snake_case__: Optional[int] , snake_case__: Tuple ) -> Union[str, Any]:
UpperCAmelCase__ = rename_keys(snake_case__ )
UpperCAmelCase__ = {}
for k, v in current_block.items():
UpperCAmelCase__ = v
UpperCAmelCase__ = new_current_block
torch.save(snake_case__ , snake_case__ )
def UpperCamelCase_( snake_case__: Dict , snake_case__: Optional[int] , snake_case__: int , snake_case__: Dict , snake_case__: str = WEIGHTS_NAME ) -> List[Any]:
UpperCAmelCase__ = convert_file_size_to_int(snake_case__ )
UpperCAmelCase__ = []
UpperCAmelCase__ = {}
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
UpperCAmelCase__ = serialization.msgpack_restore(fp.read() )['optimizer']['target']
UpperCAmelCase__ = flatten_dict(snake_case__ , sep='/' )
UpperCAmelCase__ = {}
for layer in checkpoint_info.keys():
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = get_key_and_tensorstore_dict(
snake_case__ , snake_case__ , snake_case__ )
if curr_real_layer_name in all_layers:
UpperCAmelCase__ = content
else:
UpperCAmelCase__ = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
UpperCAmelCase__ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
UpperCAmelCase__ = torch.tensor(snake_case__ )
UpperCAmelCase__ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
UpperCAmelCase__ , UpperCAmelCase__ = rename_base_flax_keys(tuple(key.split('/' ) ) , snake_case__ )
UpperCAmelCase__ = '/'.join(snake_case__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
UpperCAmelCase__ = os.path.join(
snake_case__ , weights_name.replace('.bin' , f"-{len(snake_case__ )+1:05d}-of-???.bin" ) )
rename_and_save_block(snake_case__ , snake_case__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
UpperCAmelCase__ = {}
UpperCAmelCase__ = 0
UpperCAmelCase__ = raw_weights.to(getattr(snake_case__ , snake_case__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
UpperCAmelCase__ = os.path.join(snake_case__ , weights_name.replace('.bin' , f"-{len(snake_case__ )+1:05d}-of-???.bin" ) )
rename_and_save_block(snake_case__ , snake_case__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(snake_case__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
for idx, shard in enumerate(snake_case__ ):
UpperCAmelCase__ = weights_name.replace(
'.bin' , f"-{idx+1:05d}-of-{len(snake_case__ ):05d}.bin" ) # len(sharded_state_dicts):05d}
UpperCAmelCase__ = os.path.join(snake_case__ , weights_name.replace('.bin' , f"-{idx+1:05d}-of-???.bin" ) )
os.rename(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
UpperCAmelCase__ = shard
for key in shard:
UpperCAmelCase__ = shard_file
# Add the metadata
UpperCAmelCase__ = {'total_size': total_size}
UpperCAmelCase__ = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(snake_case__ , snake_case__ ) , 'w' , encoding='utf-8' ) as f:
UpperCAmelCase__ = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '\n'
f.write(snake_case__ )
return metadata, index
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_UpperCamelCase = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCamelCase_( ) -> Optional[Any]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
UpperCAmelCase__ = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
UpperCAmelCase__ = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
UpperCAmelCase__ = TaTokenizer.from_pretrained('t5-small' )
UpperCAmelCase__ = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
UpperCAmelCase__ = tokenizer(snake_case__ , return_tensors='pt' ).input_ids
UpperCAmelCase__ = model.generate(snake_case__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 335 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
'''simple docstring'''
def __init__(self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = ''
UpperCAmelCase__ = ''
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = 256
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
def UpperCamelCase__ (self , __a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = cva.imread(__a , 0 )
UpperCAmelCase__ = copy.deepcopy(self.img )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
UpperCAmelCase__ = np.sum(__a )
for i in range(len(__a ) ):
UpperCAmelCase__ = x[i] / self.k
self.sk += prk
UpperCAmelCase__ = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase__ = int(last % last )
UpperCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__a )
UpperCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase__ = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase__ = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_UpperCamelCase = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
_UpperCamelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 335 | 1 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def UpperCamelCase_( snake_case__: Optional[int] , snake_case__: List[Any] , snake_case__: Union[str, Any] ) -> Tuple:
UpperCAmelCase__ = OmegaConf.load(snake_case__ )
UpperCAmelCase__ = torch.load(snake_case__ , map_location='cpu' )['model']
UpperCAmelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
UpperCAmelCase__ = {}
UpperCAmelCase__ = 'first_stage_model.'
for key in keys:
if key.startswith(snake_case__ ):
UpperCAmelCase__ = state_dict[key]
# extract state_dict for UNetLDM
UpperCAmelCase__ = {}
UpperCAmelCase__ = 'model.diffusion_model.'
for key in keys:
if key.startswith(snake_case__ ):
UpperCAmelCase__ = state_dict[key]
UpperCAmelCase__ = config.model.params.first_stage_config.params
UpperCAmelCase__ = config.model.params.unet_config.params
UpperCAmelCase__ = VQModel(**snake_case__ ).eval()
vqvae.load_state_dict(snake_case__ )
UpperCAmelCase__ = UNetLDMModel(**snake_case__ ).eval()
unet.load_state_dict(snake_case__ )
UpperCAmelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=snake_case__ , )
UpperCAmelCase__ = LDMPipeline(snake_case__ , snake_case__ , snake_case__ )
pipeline.save_pretrained(snake_case__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
_UpperCamelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 335 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=32 , __a=2 , __a=3 , __a=16 , __a=[1, 2, 1] , __a=[2, 2, 4] , __a=2 , __a=2.0 , __a=True , __a=0.0 , __a=0.0 , __a=0.1 , __a="gelu" , __a=False , __a=True , __a=0.02 , __a=1E-5 , __a=True , __a=None , __a=True , __a=10 , __a=8 , ) -> str:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = embed_dim
UpperCAmelCase__ = depths
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = window_size
UpperCAmelCase__ = mlp_ratio
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = use_absolute_embeddings
UpperCAmelCase__ = patch_norm
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = is_training
UpperCAmelCase__ = scope
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = encoder_stride
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCamelCase__ (self , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = SwinvaModel(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a )
UpperCAmelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase__ (self , __a , __a , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = SwinvaForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase__ = 1
UpperCAmelCase__ = SwinvaForMaskedImageModeling(__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase__ (self , __a , __a , __a ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.type_sequence_label_size
UpperCAmelCase__ = SwinvaForImageClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = SwinvaModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__a , embed_dim=37 )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = outputs.attentions
UpperCAmelCase__ = len(self.model_tester.depths )
self.assertEqual(len(__a ) , __a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase__ = True
UpperCAmelCase__ = config.window_size**2
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = outputs.attentions
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
UpperCAmelCase__ = len(__a )
# Check attention is always last and order is fine
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
UpperCAmelCase__ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCAmelCase__ = 2
self.assertEqual(out_len + added_hidden_states , len(__a ) )
UpperCAmelCase__ = outputs.attentions
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def UpperCamelCase__ (self , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = outputs.hidden_states
UpperCAmelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# Swinv2 has a different seq_length
UpperCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCAmelCase__ = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = reshaped_hidden_states[0].shape
UpperCAmelCase__ = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , __a )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = 3
UpperCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = SwinvaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = _config_zero_init(__a )
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
__a )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
UpperCAmelCase__ = image_processor(images=__a , return_tensors='pt' ).to(__a )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**__a )
# verify the logits
UpperCAmelCase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
UpperCAmelCase__ = torch.tensor([-0.39_47, -0.43_06, 0.00_26] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
| 335 | 1 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase_( ) -> Tuple:
UpperCAmelCase__ = HfArgumentParser(snake_case__ )
UpperCAmelCase__ = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase__ = TensorFlowBenchmark(args=snake_case__ )
try:
UpperCAmelCase__ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCAmelCase__ = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
UpperCAmelCase__ = ' '.join(str(snake_case__ ).split(' ' )[:-1] )
UpperCAmelCase__ = ''
UpperCAmelCase__ = eval(str(snake_case__ ).split(' ' )[-1] )
UpperCAmelCase__ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(snake_case__ )
if len(snake_case__ ) > 0:
UpperCAmelCase__ = full_error_msg + begin_error_msg + str(snake_case__ )
raise ValueError(snake_case__ )
benchmark.run()
if __name__ == "__main__":
main()
| 335 |
from collections import deque
def UpperCamelCase_( snake_case__: Tuple ) -> Tuple:
UpperCAmelCase__ = len(snake_case__ )
UpperCAmelCase__ = deque()
UpperCAmelCase__ = [False for _ in range(snake_case__ )]
UpperCAmelCase__ = [-1 for _ in range(snake_case__ )]
UpperCAmelCase__ = index_of[:]
def strong_connect(snake_case__: List[str] , snake_case__: List[str] , snake_case__: List[str] ):
UpperCAmelCase__ = index # the number when this node is seen
UpperCAmelCase__ = index # lowest rank node reachable from here
index += 1
stack.append(snake_case__ )
UpperCAmelCase__ = True
for w in g[v]:
if index_of[w] == -1:
UpperCAmelCase__ = strong_connect(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCAmelCase__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCAmelCase__ = []
UpperCAmelCase__ = stack.pop()
UpperCAmelCase__ = False
component.append(snake_case__ )
while w != v:
UpperCAmelCase__ = stack.pop()
UpperCAmelCase__ = False
component.append(snake_case__ )
components.append(snake_case__ )
return index
UpperCAmelCase__ = []
for v in range(snake_case__ ):
if index_of[v] == -1:
strong_connect(snake_case__ , 0 , snake_case__ )
return components
def UpperCamelCase_( snake_case__: Dict , snake_case__: List[Any] ) -> Optional[int]:
UpperCAmelCase__ = [[] for _ in range(snake_case__ )]
for u, v in edges:
g[u].append(snake_case__ )
return g
if __name__ == "__main__":
# Test
_UpperCamelCase = 7
_UpperCamelCase = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_UpperCamelCase = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_UpperCamelCase = [(u, v) for u, v in zip(source, target)]
_UpperCamelCase = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 335 | 1 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCamelCase_( snake_case__: List[str] , snake_case__: List[Any]=7 ) -> Union[str, Any]:
UpperCAmelCase__ = None
if token is not None:
UpperCAmelCase__ = {'Accept': 'application/vnd.github+json', 'Authorization': f"Bearer {token}"}
# The id of a workflow (not of a workflow run)
UpperCAmelCase__ = '636036'
UpperCAmelCase__ = f"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
UpperCAmelCase__ = requests.get(snake_case__ , headers=snake_case__ ).json()
return result["workflow_runs"]
def UpperCamelCase_( snake_case__: Union[str, Any] ) -> Any:
UpperCAmelCase__ = get_daily_ci_runs(snake_case__ )
UpperCAmelCase__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
UpperCAmelCase__ = workflow_run['id']
break
return workflow_run_id
def UpperCamelCase_( snake_case__: Optional[Any] , snake_case__: str , snake_case__: List[str] ) -> Optional[Any]:
UpperCAmelCase__ = get_last_daily_ci_runs(snake_case__ )
if workflow_run_id is not None:
UpperCAmelCase__ = get_artifacts_links(worflow_run_id=snake_case__ , token=snake_case__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
UpperCAmelCase__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=snake_case__ , artifact_url=snake_case__ , output_dir=snake_case__ , token=snake_case__ )
def UpperCamelCase_( snake_case__: Optional[Any] , snake_case__: int , snake_case__: Optional[int] ) -> List[Any]:
get_last_daily_ci_artifacts(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase__ = {}
for artifact_name in artifact_names:
UpperCAmelCase__ = os.path.join(snake_case__ , f"{artifact_name}.zip" )
if os.path.isfile(snake_case__ ):
UpperCAmelCase__ = {}
with zipfile.ZipFile(snake_case__ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case__ ):
# read the file
with z.open(snake_case__ ) as f:
UpperCAmelCase__ = f.read().decode('UTF-8' )
return results
| 335 |
from ...configuration_utils import PretrainedConfig
_UpperCamelCase = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """tapas"""
def __init__(self , __a=30522 , __a=768 , __a=12 , __a=12 , __a=3072 , __a="gelu" , __a=0.1 , __a=0.1 , __a=1024 , __a=[3, 256, 256, 2, 256, 256, 10] , __a=0.02 , __a=1E-1_2 , __a=0 , __a=10.0 , __a=0 , __a=1.0 , __a=None , __a=1.0 , __a=False , __a=None , __a=1.0 , __a=1.0 , __a=False , __a=False , __a="ratio" , __a=None , __a=None , __a=64 , __a=32 , __a=False , __a=True , __a=False , __a=False , __a=True , __a=False , __a=None , __a=None , **__a , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=__a , **__a )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_sizes
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
# Fine-tuning task hyperparameters
UpperCAmelCase__ = positive_label_weight
UpperCAmelCase__ = num_aggregation_labels
UpperCAmelCase__ = aggregation_loss_weight
UpperCAmelCase__ = use_answer_as_supervision
UpperCAmelCase__ = answer_loss_importance
UpperCAmelCase__ = use_normalized_answer_loss
UpperCAmelCase__ = huber_loss_delta
UpperCAmelCase__ = temperature
UpperCAmelCase__ = aggregation_temperature
UpperCAmelCase__ = use_gumbel_for_cells
UpperCAmelCase__ = use_gumbel_for_aggregation
UpperCAmelCase__ = average_approximation_function
UpperCAmelCase__ = cell_selection_preference
UpperCAmelCase__ = answer_loss_cutoff
UpperCAmelCase__ = max_num_rows
UpperCAmelCase__ = max_num_columns
UpperCAmelCase__ = average_logits_per_cell
UpperCAmelCase__ = select_one_column
UpperCAmelCase__ = allow_empty_column_selection
UpperCAmelCase__ = init_cell_selection_weights_to_zero
UpperCAmelCase__ = reset_position_index_per_cell
UpperCAmelCase__ = disable_per_token_loss
# Aggregation hyperparameters
UpperCAmelCase__ = aggregation_labels
UpperCAmelCase__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , __a ):
UpperCAmelCase__ = {int(__a ): v for k, v in aggregation_labels.items()}
| 335 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCamelCase = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''DPTFeatureExtractor''']
_UpperCamelCase = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 | 1 |
import math
def UpperCamelCase_( snake_case__: float , snake_case__: float ) -> float:
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(snake_case__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 335 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase__ = XCLIPTextConfig()
# derive patch size from model name
UpperCAmelCase__ = model_name.find('patch' )
UpperCAmelCase__ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
UpperCAmelCase__ = XCLIPVisionConfig(patch_size=snake_case__ , num_frames=snake_case__ )
if "large" in model_name:
UpperCAmelCase__ = 7_68
UpperCAmelCase__ = 30_72
UpperCAmelCase__ = 12
UpperCAmelCase__ = 10_24
UpperCAmelCase__ = 40_96
UpperCAmelCase__ = 16
UpperCAmelCase__ = 24
UpperCAmelCase__ = 7_68
UpperCAmelCase__ = 30_72
if model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase__ = 3_36
UpperCAmelCase__ = XCLIPConfig.from_text_vision_configs(snake_case__ , snake_case__ )
if "large" in model_name:
UpperCAmelCase__ = 7_68
return config
def UpperCamelCase_( snake_case__: Any ) -> Tuple:
# text encoder
if name == "token_embedding.weight":
UpperCAmelCase__ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
UpperCAmelCase__ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
UpperCAmelCase__ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
UpperCAmelCase__ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
UpperCAmelCase__ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
UpperCAmelCase__ = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
UpperCAmelCase__ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
UpperCAmelCase__ = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
UpperCAmelCase__ = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
UpperCAmelCase__ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
UpperCAmelCase__ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
UpperCAmelCase__ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
UpperCAmelCase__ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
UpperCAmelCase__ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
UpperCAmelCase__ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
UpperCAmelCase__ = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
UpperCAmelCase__ = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
UpperCAmelCase__ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
UpperCAmelCase__ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
UpperCAmelCase__ = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
UpperCAmelCase__ = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
UpperCAmelCase__ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: List[Any] ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ = orig_state_dict.pop(snake_case__ )
if "attn.in_proj" in key:
UpperCAmelCase__ = key.split('.' )
if key.startswith('visual' ):
UpperCAmelCase__ = key_split[3]
UpperCAmelCase__ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCAmelCase__ = val[
:dim, :
]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[
-dim:, :
]
else:
UpperCAmelCase__ = val[
:dim
]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[
-dim:
]
else:
if "weight" in key:
UpperCAmelCase__ = val[
:dim, :
]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[
-dim:, :
]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[-dim:]
elif key.startswith('mit' ):
UpperCAmelCase__ = key_split[2]
UpperCAmelCase__ = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCAmelCase__ = val[:dim, :]
UpperCAmelCase__ = val[dim : dim * 2, :]
UpperCAmelCase__ = val[-dim:, :]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[dim : dim * 2]
UpperCAmelCase__ = val[-dim:]
else:
UpperCAmelCase__ = key_split[2]
UpperCAmelCase__ = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase__ = val[:dim, :]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[-dim:, :]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[-dim:]
else:
UpperCAmelCase__ = rename_key(snake_case__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCAmelCase__ = val.T
UpperCAmelCase__ = val
return orig_state_dict
def UpperCamelCase_( snake_case__: Tuple ) -> Optional[Any]:
if num_frames == 8:
UpperCAmelCase__ = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
UpperCAmelCase__ = 'eating_spaghetti.npy'
elif num_frames == 32:
UpperCAmelCase__ = 'eating_spaghetti_32_frames.npy'
UpperCAmelCase__ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=snake_case__ , repo_type='dataset' , )
UpperCAmelCase__ = np.load(snake_case__ )
return list(snake_case__ )
def UpperCamelCase_( snake_case__: Tuple , snake_case__: str=None , snake_case__: Union[str, Any]=False ) -> List[Any]:
UpperCAmelCase__ = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
UpperCAmelCase__ = model_to_url[model_name]
UpperCAmelCase__ = 8
if "16-frames" in model_name:
UpperCAmelCase__ = 16
elif "shot" in model_name:
UpperCAmelCase__ = 32
UpperCAmelCase__ = get_xclip_config(snake_case__ , snake_case__ )
UpperCAmelCase__ = XCLIPModel(snake_case__ )
model.eval()
if "drive" in checkpoint_url:
UpperCAmelCase__ = 'pytorch_model.bin'
gdown.cached_download(snake_case__ , snake_case__ , quiet=snake_case__ )
UpperCAmelCase__ = torch.load(snake_case__ , map_location='cpu' )['model']
else:
UpperCAmelCase__ = torch.hub.load_state_dict_from_url(snake_case__ )['model']
UpperCAmelCase__ = convert_state_dict(snake_case__ , snake_case__ )
UpperCAmelCase__ = XCLIPModel(snake_case__ )
UpperCAmelCase__ , UpperCAmelCase__ = model.load_state_dict(snake_case__ , strict=snake_case__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCAmelCase__ = 3_36 if model_name == 'xclip-large-patch14-16-frames' else 2_24
UpperCAmelCase__ = VideoMAEImageProcessor(size=snake_case__ )
UpperCAmelCase__ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
UpperCAmelCase__ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
UpperCAmelCase__ = XCLIPProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
UpperCAmelCase__ = prepare_video(snake_case__ )
UpperCAmelCase__ = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=snake_case__ , return_tensors='pt' , padding=snake_case__ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
UpperCAmelCase__ = model(**snake_case__ )
# Verify outputs
UpperCAmelCase__ = outputs.logits_per_video
UpperCAmelCase__ = logits_per_video.softmax(dim=1 )
print('Probs:' , snake_case__ )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCAmelCase__ = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCAmelCase__ = torch.tensor([[7.0_999e-04, 9.9_883e-01, 4.5_580e-04]] )
elif model_name == "xclip-base-patch16":
UpperCAmelCase__ = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCAmelCase__ = torch.tensor([[7.6_937e-04, 9.9_728e-01, 1.9_473e-03]] )
elif model_name == "xclip-large-patch14":
UpperCAmelCase__ = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase__ = torch.tensor([[3.3_877e-04, 9.9_937e-01, 2.8_888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCAmelCase__ = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCAmelCase__ = torch.tensor([[3.8_554e-04, 9.9_929e-01, 3.2_754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCAmelCase__ = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCAmelCase__ = torch.tensor([[7.1_890e-06, 9.9_994e-01, 5.6_559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCAmelCase__ = torch.tensor([[1.0_320e-05, 9.9_993e-01, 6.2_435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCAmelCase__ = torch.tensor([[4.1_377e-06, 9.9_990e-01, 9.8_386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCAmelCase__ = torch.tensor([[4.1_347e-05, 9.9_962e-01, 3.3_411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCAmelCase__ = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCAmelCase__ = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCAmelCase__ = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCAmelCase__ = torch.tensor([[9.8_219e-04, 9.9_593e-01, 3.0_863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCAmelCase__ = torch.tensor([[3.5_082e-04, 9.9_785e-01, 1.7_966e-03]] )
else:
raise ValueError(f"Model name {model_name} not supported" )
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(snake_case__ , organization='nielsr' )
processor.push_to_hub(snake_case__ , organization='nielsr' )
slow_tokenizer.push_to_hub(snake_case__ , organization='nielsr' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_UpperCamelCase = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 335 | 1 |
# flake8: noqa
# Lint as: python3
_UpperCamelCase = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 335 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def UpperCamelCase_( snake_case__: Optional[int] , snake_case__: List[Any] , snake_case__: Union[str, Any] ) -> Tuple:
UpperCAmelCase__ = OmegaConf.load(snake_case__ )
UpperCAmelCase__ = torch.load(snake_case__ , map_location='cpu' )['model']
UpperCAmelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
UpperCAmelCase__ = {}
UpperCAmelCase__ = 'first_stage_model.'
for key in keys:
if key.startswith(snake_case__ ):
UpperCAmelCase__ = state_dict[key]
# extract state_dict for UNetLDM
UpperCAmelCase__ = {}
UpperCAmelCase__ = 'model.diffusion_model.'
for key in keys:
if key.startswith(snake_case__ ):
UpperCAmelCase__ = state_dict[key]
UpperCAmelCase__ = config.model.params.first_stage_config.params
UpperCAmelCase__ = config.model.params.unet_config.params
UpperCAmelCase__ = VQModel(**snake_case__ ).eval()
vqvae.load_state_dict(snake_case__ )
UpperCAmelCase__ = UNetLDMModel(**snake_case__ ).eval()
unet.load_state_dict(snake_case__ )
UpperCAmelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=snake_case__ , )
UpperCAmelCase__ = LDMPipeline(snake_case__ , snake_case__ , snake_case__ )
pipeline.save_pretrained(snake_case__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
_UpperCamelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 335 | 1 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=4 , ) -> int:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_attention_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_choices
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_attention_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = FlaxRoFormerModelTester(self )
@slow
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase__ = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=__a )
UpperCAmelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__a )
@require_flax
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
UpperCAmelCase__ = jnp.array([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = model(__a )[0]
UpperCAmelCase__ = 50000
UpperCAmelCase__ = (1, 6, vocab_size)
self.assertEqual(output.shape , __a )
UpperCAmelCase__ = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
| 335 |
# flake8: noqa
# Lint as: python3
_UpperCamelCase = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 335 | 1 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_UpperCamelCase = Lock()
def UpperCamelCase_( snake_case__: Optional[Any] , snake_case__: Optional[int] , snake_case__: Tuple , snake_case__: Tuple , snake_case__: Tuple , snake_case__: Dict , snake_case__: Any ) -> str:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase__ = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase__ = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def UpperCamelCase_( snake_case__: Any ) -> Tuple:
UpperCAmelCase__ = []
UpperCAmelCase__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase__ = Pipe()
UpperCAmelCase__ = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase__ = temp_rs
UpperCAmelCase__ = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
UpperCAmelCase__ = Pipe()
UpperCAmelCase__ = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase__ = temp_rs
UpperCAmelCase__ = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
UpperCAmelCase__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase_( ) -> Dict:
UpperCAmelCase__ = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
UpperCAmelCase__ = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main()
| 335 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """sew-d"""
def __init__(self , __a=32 , __a=768 , __a=12 , __a=12 , __a=3072 , __a=2 , __a=512 , __a=256 , __a=True , __a=True , __a=("p2c", "c2p") , __a="layer_norm" , __a="gelu_python" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.1 , __a=0.02 , __a=1E-7 , __a=1E-5 , __a="group" , __a="gelu" , __a=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __a=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __a=False , __a=128 , __a=16 , __a=True , __a=0.05 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=0 , __a="mean" , __a=False , __a=False , __a=256 , __a=0 , __a=1 , __a=2 , **__a , ) -> str:
"""simple docstring"""
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = feat_extract_norm
UpperCAmelCase__ = feat_extract_activation
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = conv_bias
UpperCAmelCase__ = num_conv_pos_embeddings
UpperCAmelCase__ = num_conv_pos_embedding_groups
UpperCAmelCase__ = len(self.conv_dim )
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = squeeze_factor
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = position_buckets
UpperCAmelCase__ = share_att_key
UpperCAmelCase__ = relative_attention
UpperCAmelCase__ = norm_rel_ebd
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = feat_proj_dropout
UpperCAmelCase__ = final_dropout
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = feature_layer_norm_eps
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase__ = apply_spec_augment
UpperCAmelCase__ = mask_time_prob
UpperCAmelCase__ = mask_time_length
UpperCAmelCase__ = mask_time_min_masks
UpperCAmelCase__ = mask_feature_prob
UpperCAmelCase__ = mask_feature_length
UpperCAmelCase__ = mask_feature_min_masks
# ctc loss
UpperCAmelCase__ = ctc_loss_reduction
UpperCAmelCase__ = ctc_zero_infinity
# sequence classification
UpperCAmelCase__ = use_weighted_layer_sum
UpperCAmelCase__ = classifier_proj_size
@property
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 335 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.