code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowercase__ ( _UpperCAmelCase ):
def __init__( self : Dict , UpperCAmelCase_ : NestedDataStructureLike[PathLike] , UpperCAmelCase_ : Optional[NamedSplit] = None , UpperCAmelCase_ : Optional[Features] = None , UpperCAmelCase_ : str = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[int] = None , **UpperCAmelCase_ : Tuple , ):
super().__init__(
UpperCAmelCase_ , split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , num_proc=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = path_or_paths if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE__ = Text(
cache_dir=UpperCAmelCase_ , data_files=UpperCAmelCase_ , features=UpperCAmelCase_ , **UpperCAmelCase_ , )
def A_ ( self : Dict ):
if self.streaming:
SCREAMING_SNAKE_CASE__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase_ , download_mode=UpperCAmelCase_ , verification_mode=UpperCAmelCase_ , base_path=UpperCAmelCase_ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE__ = self.builder.as_dataset(
split=self.split , verification_mode=UpperCAmelCase_ , in_memory=self.keep_in_memory )
return dataset
| 176 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = GPTaTokenizer
UpperCamelCase__ = GPTaTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = {'''add_prefix_space''': True}
UpperCamelCase__ = False
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
a = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
a = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
a = {"""unk_token""": """<unk>"""}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__magic_name__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__magic_name__ ) )
def lowerCamelCase__ ( self :Dict , **__magic_name__ :List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowerCamelCase__ ( self :List[str] , **__magic_name__ :Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowerCamelCase__ ( self :Dict , __magic_name__ :List[str] ):
'''simple docstring'''
a = """lower newer"""
a = """lower newer"""
return input_text, output_text
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a = """lower newer"""
a = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
a = tokenizer.tokenize(__magic_name__ , add_prefix_space=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
a = tokens + [tokenizer.unk_token]
a = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer(add_prefix_space=__magic_name__ )
a = """lower newer"""
# Testing tokenization
a = tokenizer.tokenize(__magic_name__ , add_prefix_space=__magic_name__ )
a = rust_tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Testing conversion to ids without special tokens
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
a = rust_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Testing conversion to ids with special tokens
a = self.get_rust_tokenizer(add_prefix_space=__magic_name__ )
a = tokenizer.encode(__magic_name__ , add_prefix_space=__magic_name__ )
a = rust_tokenizer.encode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Testing the unknown token
a = tokens + [rust_tokenizer.unk_token]
a = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def lowerCamelCase__ ( self :Optional[int] , *__magic_name__ :Tuple , **__magic_name__ :str ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Union[str, Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
# Simple input
a = """This is a simple input"""
a = ["""This is a simple input 1""", """This is a simple input 2"""]
a = ("""This is a simple input""", """This is a pair""")
a = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__magic_name__ , tokenizer_r.encode , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Simple input
self.assertRaises(__magic_name__ , tokenizer_r.encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Simple input
self.assertRaises(
__magic_name__ , tokenizer_r.batch_encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" , )
# Pair input
self.assertRaises(__magic_name__ , tokenizer_r.encode , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Pair input
self.assertRaises(__magic_name__ , tokenizer_r.encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Pair input
self.assertRaises(
__magic_name__ , tokenizer_r.batch_encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" , )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
a = """This is a simple input"""
a = ["""This is a simple input looooooooong""", """This is a simple input"""]
a = ("""This is a simple input""", """This is a pair""")
a = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
a = tokenizer.pad_token_id
a = tokenizer(__magic_name__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
a = tokenizer(__magic_name__ , padding=__magic_name__ , truncate=__magic_name__ , return_tensors="""np""" )
a = tokenizer(*__magic_name__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
a = tokenizer(__magic_name__ , padding=__magic_name__ , truncate=__magic_name__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = """$$$"""
a = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__magic_name__ , add_bos_token=__magic_name__ )
a = """This is a simple input"""
a = ["""This is a simple input 1""", """This is a simple input 2"""]
a = tokenizer.bos_token_id
a = tokenizer(__magic_name__ )
a = tokenizer(__magic_name__ )
self.assertEqual(out_s.input_ids[0] , __magic_name__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
a = tokenizer.decode(out_s.input_ids )
a = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __magic_name__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = [self.get_tokenizer(do_lower_case=__magic_name__ , add_bos_token=__magic_name__ )]
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a = """Encode this."""
a = """This one too please."""
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
encoded_sequence += tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = tokenizer.encode_plus(
__magic_name__ , __magic_name__ , add_special_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , )
a = encoded_sequence_dict["""input_ids"""]
a = encoded_sequence_dict["""special_tokens_mask"""]
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) )
a = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__magic_name__ )
]
a = [x for x in filtered_sequence if x is not None]
self.assertEqual(__magic_name__ , __magic_name__ )
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=__magic_name__ )
a = """A photo of a cat"""
a = tokenizer.encode(
__magic_name__ , )
self.assertEqual(__magic_name__ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""test_opt""" )
a = AutoTokenizer.from_pretrained("""./test_opt""" )
a = tokenizer.encode(
__magic_name__ , )
self.assertEqual(__magic_name__ , [2, 250, 1345, 9, 10, 4758] )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , use_slow=__magic_name__ )
a = """A photo of a cat"""
a = tokenizer.encode(
__magic_name__ , )
# Same as above
self.assertEqual(__magic_name__ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("""This test is failing because of a bug in the fast tokenizer""" )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=__magic_name__ )
a = """bos"""
a = tokenizer.get_vocab()["""bos"""]
a = """A photo of a cat"""
a = tokenizer.encode(
__magic_name__ , )
# We changed the bos token
self.assertEqual(__magic_name__ , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""./tok""" )
a = AutoTokenizer.from_pretrained("""./tok""" )
self.assertTrue(tokenizer.is_fast )
a = tokenizer.encode(
__magic_name__ , )
self.assertEqual(__magic_name__ , [3_1957, 250, 1345, 9, 10, 4758] )
| 228 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
a__ : List[str] = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
SCREAMING_SNAKE_CASE : str = get_sagemaker_input()
else:
SCREAMING_SNAKE_CASE : Optional[Any] = get_cluster_input()
return config
def UpperCAmelCase_( a__=None ):
"""simple docstring"""
if subparsers is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = subparsers.add_parser('''config''' , description=a__ )
else:
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser('''Accelerate config command''' , description=a__ )
parser.add_argument(
'''--config_file''' , default=a__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=a__ )
return parser
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = get_user_input()
if args.config_file is not None:
SCREAMING_SNAKE_CASE : int = args.config_file
else:
if not os.path.isdir(a__ ):
os.makedirs(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(a__ )
else:
config.to_yaml_file(a__ )
print(F"""accelerate configuration saved at {config_file}""" )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = config_command_parser()
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
config_command(a__ )
if __name__ == "__main__":
main()
| 366 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
a__ : List[str] = logging.get_logger(__name__)
# General docstring
a__ : Tuple = '''MobileNetV1Config'''
# Base docstring
a__ : Optional[Any] = '''google/mobilenet_v1_1.0_224'''
a__ : Tuple = [1, 1_024, 7, 7]
# Image classification docstring
a__ : Optional[int] = '''google/mobilenet_v1_1.0_224'''
a__ : int = '''tabby, tabby cat'''
a__ : List[Any] = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def UpperCAmelCase_( a__ , a__ , a__=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = {}
if isinstance(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[str] = model.mobilenet_va
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = model
SCREAMING_SNAKE_CASE : Optional[int] = '''MobilenetV1/Conv2d_0/'''
SCREAMING_SNAKE_CASE : Tuple = backbone.conv_stem.convolution.weight
SCREAMING_SNAKE_CASE : Tuple = backbone.conv_stem.normalization.bias
SCREAMING_SNAKE_CASE : Optional[Any] = backbone.conv_stem.normalization.weight
SCREAMING_SNAKE_CASE : Union[str, Any] = backbone.conv_stem.normalization.running_mean
SCREAMING_SNAKE_CASE : Any = backbone.conv_stem.normalization.running_var
for i in range(13 ):
SCREAMING_SNAKE_CASE : Dict = i + 1
SCREAMING_SNAKE_CASE : Union[str, Any] = i * 2
SCREAMING_SNAKE_CASE : Any = backbone.layer[pt_index]
SCREAMING_SNAKE_CASE : Optional[Any] = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
SCREAMING_SNAKE_CASE : Any = pointer.convolution.weight
SCREAMING_SNAKE_CASE : Tuple = pointer.normalization.bias
SCREAMING_SNAKE_CASE : List[Any] = pointer.normalization.weight
SCREAMING_SNAKE_CASE : Optional[Any] = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE : List[Any] = pointer.normalization.running_var
SCREAMING_SNAKE_CASE : List[Any] = backbone.layer[pt_index + 1]
SCREAMING_SNAKE_CASE : Any = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
SCREAMING_SNAKE_CASE : Dict = pointer.convolution.weight
SCREAMING_SNAKE_CASE : Optional[Any] = pointer.normalization.bias
SCREAMING_SNAKE_CASE : Optional[Any] = pointer.normalization.weight
SCREAMING_SNAKE_CASE : int = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE : str = pointer.normalization.running_var
if isinstance(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[Any] = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
SCREAMING_SNAKE_CASE : List[str] = model.classifier.weight
SCREAMING_SNAKE_CASE : List[str] = model.classifier.bias
return tf_to_pt_map
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
SCREAMING_SNAKE_CASE : Optional[Any] = tf.train.list_variables(a__ )
SCREAMING_SNAKE_CASE : List[Any] = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
SCREAMING_SNAKE_CASE : Tuple = tf.train.load_variable(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = array
# Build TF to PyTorch weights loading map
SCREAMING_SNAKE_CASE : int = _build_tf_to_pytorch_map(a__ , a__ , a__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
SCREAMING_SNAKE_CASE : Union[str, Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
SCREAMING_SNAKE_CASE : Tuple = np.transpose(a__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
SCREAMING_SNAKE_CASE : Union[str, Any] = array.squeeze().transpose()
else:
SCREAMING_SNAKE_CASE : Optional[int] = np.transpose(a__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(a__ )
tf_weights.pop(a__ , a__ )
tf_weights.pop(name + '''/RMSProp''' , a__ )
tf_weights.pop(name + '''/RMSProp_1''' , a__ )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , a__ )
logger.info(F"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" )
return model
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = features.shape[-2:]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = conv_layer.stride
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = conv_layer.kernel_size
if in_height % stride_height == 0:
SCREAMING_SNAKE_CASE : List[str] = max(kernel_height - stride_height , 0 )
else:
SCREAMING_SNAKE_CASE : str = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
SCREAMING_SNAKE_CASE : int = max(kernel_width - stride_width , 0 )
else:
SCREAMING_SNAKE_CASE : Tuple = max(kernel_width - (in_width % stride_width) , 0 )
SCREAMING_SNAKE_CASE : List[str] = pad_along_width // 2
SCREAMING_SNAKE_CASE : Any = pad_along_width - pad_left
SCREAMING_SNAKE_CASE : str = pad_along_height // 2
SCREAMING_SNAKE_CASE : Optional[int] = pad_along_height - pad_top
SCREAMING_SNAKE_CASE : List[Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(a__ , a__ , '''constant''' , 0.0 )
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = 1 , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = True , ) ->None:
super().__init__()
SCREAMING_SNAKE_CASE : Any = config
if in_channels % groups != 0:
raise ValueError(F"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(F"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
SCREAMING_SNAKE_CASE : Any = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
SCREAMING_SNAKE_CASE : List[str] = nn.Convad(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , kernel_size=_lowerCamelCase , stride=_lowerCamelCase , padding=_lowerCamelCase , groups=_lowerCamelCase , bias=_lowerCamelCase , padding_mode='''zeros''' , )
if use_normalization:
SCREAMING_SNAKE_CASE : List[Any] = nn.BatchNormad(
num_features=_lowerCamelCase , eps=config.layer_norm_eps , momentum=0.9_9_9_7 , affine=_lowerCamelCase , track_running_stats=_lowerCamelCase , )
else:
SCREAMING_SNAKE_CASE : Dict = None
if use_activation:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Any = ACTaFN[use_activation]
elif isinstance(config.hidden_act , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE : List[Any] = config.hidden_act
else:
SCREAMING_SNAKE_CASE : Optional[Any] = None
def __lowerCAmelCase ( self , _lowerCamelCase ) ->torch.Tensor:
if self.config.tf_padding:
SCREAMING_SNAKE_CASE : List[Any] = apply_tf_padding(_lowerCamelCase , self.convolution )
SCREAMING_SNAKE_CASE : Dict = self.convolution(_lowerCamelCase )
if self.normalization is not None:
SCREAMING_SNAKE_CASE : int = self.normalization(_lowerCamelCase )
if self.activation is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.activation(_lowerCamelCase )
return features
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = MobileNetVaConfig
__SCREAMING_SNAKE_CASE : List[Any] = load_tf_weights_in_mobilenet_va
__SCREAMING_SNAKE_CASE : int = 'mobilenet_v1'
__SCREAMING_SNAKE_CASE : int = 'pixel_values'
__SCREAMING_SNAKE_CASE : List[str] = False
def __lowerCAmelCase ( self , _lowerCamelCase ) ->None:
if isinstance(_lowerCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_lowerCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
a__ : str = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
a__ : Union[str, Any] = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' , a__ , )
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase = True ) ->Dict:
super().__init__(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = config
SCREAMING_SNAKE_CASE : Dict = 32
SCREAMING_SNAKE_CASE : Optional[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
SCREAMING_SNAKE_CASE : str = MobileNetVaConvLayer(
_lowerCamelCase , in_channels=config.num_channels , out_channels=_lowerCamelCase , kernel_size=3 , stride=2 , )
SCREAMING_SNAKE_CASE : Union[str, Any] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
SCREAMING_SNAKE_CASE : Any = nn.ModuleList()
for i in range(13 ):
SCREAMING_SNAKE_CASE : int = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
SCREAMING_SNAKE_CASE : Tuple = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_lowerCamelCase , in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , kernel_size=3 , stride=strides[i] , groups=_lowerCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
_lowerCamelCase , in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , kernel_size=1 , ) )
SCREAMING_SNAKE_CASE : int = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
raise NotImplementedError
@add_start_docstrings_to_model_forward(_lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowerCAmelCase ( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , ) ->Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
SCREAMING_SNAKE_CASE : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_stem(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
SCREAMING_SNAKE_CASE : Optional[int] = layer_module(_lowerCamelCase )
if output_hidden_states:
SCREAMING_SNAKE_CASE : List[str] = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE : List[str] = hidden_states
if self.pooler is not None:
SCREAMING_SNAKE_CASE : Tuple = torch.flatten(self.pooler(_lowerCamelCase ) , start_dim=1 )
else:
SCREAMING_SNAKE_CASE : List[Any] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowerCamelCase , pooler_output=_lowerCamelCase , hidden_states=_lowerCamelCase , )
@add_start_docstrings(
'\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , a__ , )
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->None:
super().__init__(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = config.num_labels
SCREAMING_SNAKE_CASE : str = MobileNetVaModel(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
SCREAMING_SNAKE_CASE : Optional[int] = nn.Dropout(config.classifier_dropout_prob , inplace=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = nn.Linear(_lowerCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowerCAmelCase ( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , ) ->Union[tuple, ImageClassifierOutputWithNoAttention]:
SCREAMING_SNAKE_CASE : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE : Dict = self.mobilenet_va(_lowerCamelCase , output_hidden_states=_lowerCamelCase , return_dict=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE : Tuple = self.classifier(self.dropout(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE : Any = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE : Optional[int] = '''single_label_classification'''
else:
SCREAMING_SNAKE_CASE : Dict = '''multi_label_classification'''
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE : Any = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE : Dict = loss_fct(_lowerCamelCase , _lowerCamelCase )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE : str = CrossEntropyLoss()
SCREAMING_SNAKE_CASE : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE : List[Any] = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE : List[Any] = loss_fct(_lowerCamelCase , _lowerCamelCase )
if not return_dict:
SCREAMING_SNAKE_CASE : Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_lowerCamelCase , logits=_lowerCamelCase , hidden_states=outputs.hidden_states , )
| 19 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ ="""deit"""
def __init__( self : Union[str, Any] , _a : int=768 , _a : Dict=12 , _a : int=12 , _a : List[str]=3072 , _a : Dict="gelu" , _a : Tuple=0.0 , _a : List[str]=0.0 , _a : Union[str, Any]=0.02 , _a : Optional[Any]=1e-12 , _a : int=224 , _a : Optional[Any]=16 , _a : Optional[Any]=3 , _a : List[Any]=True , _a : Dict=16 , **_a : List[str] , ) -> Dict:
super().__init__(**_a )
__lowerCamelCase : Union[str, Any] = hidden_size
__lowerCamelCase : Dict = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : str = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : str = attention_probs_dropout_prob
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : List[Any] = image_size
__lowerCamelCase : List[Any] = patch_size
__lowerCamelCase : str = num_channels
__lowerCamelCase : Any = qkv_bias
__lowerCamelCase : int = encoder_stride
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =version.parse("""1.11""" )
@property
def _lowercase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _lowercase ( self : Dict ) -> float:
return 1e-4
| 208 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCamelCase = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 208 | 1 |
def UpperCamelCase ( snake_case__ : str ) -> List[Any]:
UpperCamelCase : List[str] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
UpperCamelCase : List[str] = ''
UpperCamelCase : List[Any] = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(a__ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
UpperCamelCase , UpperCamelCase : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
UpperCamelCase : Dict = [1 for i in range(len(a__ ) )]
# for each character in new_string find corresponding palindromic string
UpperCamelCase : Optional[Any] = 0
for j in range(len(a__ ) ):
UpperCamelCase : Union[str, Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(a__ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
UpperCamelCase : List[Any] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
UpperCamelCase : Dict = j - k + 1 # noqa: E741
UpperCamelCase : Optional[Any] = j + k - 1
# update max_length and start position
if max_length < length[j]:
UpperCamelCase : Any = length[j]
UpperCamelCase : int = j
# create that string
UpperCamelCase : Optional[Any] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
__UpperCAmelCase , __UpperCAmelCase = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
__UpperCAmelCase = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
__UpperCAmelCase = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
__UpperCAmelCase = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"""python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 103 | 0 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_lowerCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
SCREAMING_SNAKE_CASE_: List[Any] =float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
SCREAMING_SNAKE_CASE_: List[Any] =[[0.0, 0.0], [0.0, 0.0]]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =matrix[1][1], matrix[0][0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =-matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_lowerCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_lowerCamelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
SCREAMING_SNAKE_CASE_: Dict =float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
SCREAMING_SNAKE_CASE_: List[Any] =[
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
SCREAMING_SNAKE_CASE_: Optional[int] =(d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
SCREAMING_SNAKE_CASE_: Tuple =-(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
SCREAMING_SNAKE_CASE_: Union[str, Any] =(d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
SCREAMING_SNAKE_CASE_: List[str] =-(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
SCREAMING_SNAKE_CASE_: Dict =(d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
SCREAMING_SNAKE_CASE_: Dict =-(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
SCREAMING_SNAKE_CASE_: Optional[int] =(d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
SCREAMING_SNAKE_CASE_: Dict =-(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
SCREAMING_SNAKE_CASE_: Tuple =(d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
SCREAMING_SNAKE_CASE_: Tuple =array(_lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
SCREAMING_SNAKE_CASE_: Optional[int] =cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
SCREAMING_SNAKE_CASE_: Dict =array(_lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_lowerCamelCase )
# Calculate the inverse of the matrix
return [[float(d(_lowerCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 173 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """char"""
__lowercase = """bpe"""
__lowercase = """wp"""
__lowerCAmelCase : Union[str, Any] =(DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = ["""image_processor""", """char_tokenizer"""]
__lowercase = """ViTImageProcessor"""
__lowercase = """MgpstrTokenizer"""
def __init__( self :int , lowercase_ :int=None , lowercase_ :List[str]=None , **lowercase_ :List[Any] )-> Optional[Any]:
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
A__ = kwargs.pop("feature_extractor" )
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
A__ = tokenizer
A__ = AutoTokenizer.from_pretrained("gpt2" )
A__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(lowercase_ , lowercase_ )
def __call__( self :Optional[Any] , lowercase_ :Any=None , lowercase_ :Tuple=None , lowercase_ :List[str]=None , **lowercase_ :Union[str, Any] )-> Optional[Any]:
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
A__ = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None:
A__ = self.char_tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
A__ = encodings["input_ids"]
return inputs
def UpperCAmelCase_ ( self :List[str] , lowercase_ :int )-> int:
A__, A__, A__ = sequences
A__ = char_preds.size(0 )
A__, A__ = self._decode_helper(lowercase_ , "char" )
A__, A__ = self._decode_helper(lowercase_ , "bpe" )
A__, A__ = self._decode_helper(lowercase_ , "wp" )
A__ = []
A__ = []
for i in range(lowercase_ ):
A__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
A__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
A__ = scores.index(max(lowercase_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
A__ = {}
A__ = final_strs
A__ = final_scores
A__ = char_strs
A__ = bpe_strs
A__ = wp_strs
return out
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :List[str] , lowercase_ :str )-> Optional[Any]:
if format == DecodeType.CHARACTER:
A__ = self.char_decode
A__ = 1
A__ = "[s]"
elif format == DecodeType.BPE:
A__ = self.bpe_decode
A__ = 2
A__ = "#"
elif format == DecodeType.WORDPIECE:
A__ = self.wp_decode
A__ = 1_02
A__ = "[SEP]"
else:
raise ValueError(F"Format {format} is not supported." )
A__, A__ = [], []
A__ = pred_logits.size(0 )
A__ = pred_logits.size(1 )
A__, A__ = pred_logits.topk(1 , dim=-1 , largest=lowercase_ , sorted=lowercase_ )
A__ = preds_index.view(-1 , lowercase_ )[:, 1:]
A__ = decoder(lowercase_ )
A__, A__ = torch.nn.functional.softmax(lowercase_ , dim=2 ).max(dim=2 )
A__ = preds_max_prob[:, 1:]
for index in range(lowercase_ ):
A__ = preds_str[index].find(lowercase_ )
A__ = preds_str[index][:pred_eos]
A__ = preds_index[index].cpu().tolist()
A__ = pred_index.index(lowercase_ ) if eos_token in pred_index else -1
A__ = preds_max_prob[index][: pred_eos_index + 1]
A__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowercase_ )
conf_scores.append(lowercase_ )
return dec_strs, conf_scores
def UpperCAmelCase_ ( self :Dict , lowercase_ :Optional[Any] )-> int:
A__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(lowercase_ )]
return decode_strs
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :Optional[Any] )-> List[str]:
return self.bpe_tokenizer.batch_decode(lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :List[str] )-> Union[str, Any]:
A__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(lowercase_ )]
return decode_strs
| 237 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class A__ ( unittest.TestCase ):
def A ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =SamImageProcessor()
_SCREAMING_SNAKE_CASE =SamProcessor(_a )
processor.save_pretrained(self.tmpdirname )
def A ( self : Tuple , **_a : Any ) -> List[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def A ( self : Dict ) -> int:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A ( self : Any ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self : Any ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def A ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =SamProcessor(image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='np' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def A ( self : int ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =SamProcessor(image_processor=_a )
_SCREAMING_SNAKE_CASE =[torch.ones((1, 3, 5, 5) )]
_SCREAMING_SNAKE_CASE =[[1764, 2646]]
_SCREAMING_SNAKE_CASE =[[683, 1024]]
_SCREAMING_SNAKE_CASE =processor.post_process_masks(_a , _a , _a )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_SCREAMING_SNAKE_CASE =processor.post_process_masks(
_a , torch.tensor(_a ) , torch.tensor(_a ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_SCREAMING_SNAKE_CASE =[np.ones((1, 3, 5, 5) )]
_SCREAMING_SNAKE_CASE =processor.post_process_masks(_a , np.array(_a ) , np.array(_a ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_SCREAMING_SNAKE_CASE =[[1, 0], [0, 1]]
with self.assertRaises(_a ):
_SCREAMING_SNAKE_CASE =processor.post_process_masks(_a , np.array(_a ) , np.array(_a ) )
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
def A ( self : Optional[int] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =SamImageProcessor()
_SCREAMING_SNAKE_CASE =SamProcessor(_a )
processor.save_pretrained(self.tmpdirname )
def A ( self : List[Any] , **_a : Any ) -> Dict:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def A ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A ( self : str ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self : Optional[Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def A ( self : str ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =SamProcessor(image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='np' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def A ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =SamProcessor(image_processor=_a )
_SCREAMING_SNAKE_CASE =[tf.ones((1, 3, 5, 5) )]
_SCREAMING_SNAKE_CASE =[[1764, 2646]]
_SCREAMING_SNAKE_CASE =[[683, 1024]]
_SCREAMING_SNAKE_CASE =processor.post_process_masks(_a , _a , _a , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_SCREAMING_SNAKE_CASE =processor.post_process_masks(
_a , tf.convert_to_tensor(_a ) , tf.convert_to_tensor(_a ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_SCREAMING_SNAKE_CASE =[np.ones((1, 3, 5, 5) )]
_SCREAMING_SNAKE_CASE =processor.post_process_masks(
_a , np.array(_a ) , np.array(_a ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_SCREAMING_SNAKE_CASE =[[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
_SCREAMING_SNAKE_CASE =processor.post_process_masks(
_a , np.array(_a ) , np.array(_a ) , return_tensors='tf' )
@require_vision
@require_torchvision
class A__ ( unittest.TestCase ):
def A ( self : int ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =SamImageProcessor()
_SCREAMING_SNAKE_CASE =SamProcessor(_a )
processor.save_pretrained(self.tmpdirname )
def A ( self : Union[str, Any] , **_a : Tuple ) -> Optional[int]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def A ( self : str ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A ( self : List[str] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def A ( self : Any ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =SamProcessor(image_processor=_a )
_SCREAMING_SNAKE_CASE =np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
_SCREAMING_SNAKE_CASE =[tf.convert_to_tensor(_a )]
_SCREAMING_SNAKE_CASE =[torch.tensor(_a )]
_SCREAMING_SNAKE_CASE =[[1764, 2646]]
_SCREAMING_SNAKE_CASE =[[683, 1024]]
_SCREAMING_SNAKE_CASE =processor.post_process_masks(
_a , _a , _a , return_tensors='tf' )
_SCREAMING_SNAKE_CASE =processor.post_process_masks(
_a , _a , _a , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def A ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =SamProcessor(image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='pt' )['pixel_values'].numpy()
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='pt' )['pixel_values'].numpy()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='tf' )['pixel_values'].numpy()
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(_a , _a ) )
self.assertTrue(np.allclose(_a , _a ) )
self.assertTrue(np.allclose(_a , _a ) )
| 364 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=A__ ):
A__ = ['note_seq']
def __init__( self : List[str] , *_a : Any , **_a : Dict ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['note_seq'] )
@classmethod
def A ( cls : Any , *_a : str , **_a : List[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['note_seq'] )
@classmethod
def A ( cls : int , *_a : Optional[Any] , **_a : Optional[int] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['note_seq'] )
| 114 | 0 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1024 , __SCREAMING_SNAKE_CASE=1024 , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE ):
lowercase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
lowercase = SeqaSeqDataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , type_path='train' , **__SCREAMING_SNAKE_CASE )
lowercase = tok.pad_token_id
def get_lens(__SCREAMING_SNAKE_CASE ):
lowercase = tqdm(
DataLoader(__SCREAMING_SNAKE_CASE , batch_size=512 , num_workers=8 , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowercase = []
for batch in dl:
lowercase = batch['input_ids'].ne(__SCREAMING_SNAKE_CASE ).sum(1 ).tolist()
lowercase = batch['labels'].ne(__SCREAMING_SNAKE_CASE ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
max_lens.append(max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
else:
max_lens.extend(__SCREAMING_SNAKE_CASE )
return max_lens
lowercase = get_lens(__SCREAMING_SNAKE_CASE )
lowercase = SeqaSeqDataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , type_path='val' , **__SCREAMING_SNAKE_CASE )
lowercase = get_lens(__SCREAMING_SNAKE_CASE )
pickle_save(__SCREAMING_SNAKE_CASE , train_ds.len_file )
pickle_save(__SCREAMING_SNAKE_CASE , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 195 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError('Input value must be an \'int\' type' )
lowercase = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 195 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Dict = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 370 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def SCREAMING_SNAKE_CASE__ ( __A ) -> Dict:
_snake_case = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
__lowercase = StableDiffusionLatentUpscalePipeline
__lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""height""",
"""width""",
"""cross_attention_kwargs""",
"""negative_prompt_embeds""",
"""prompt_embeds""",
}
__lowercase = PipelineTesterMixin.required_optional_params - {"""num_images_per_prompt"""}
__lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowercase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowercase = frozenset([] )
__lowercase = True
@property
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 1
_snake_case = 4
_snake_case = (16, 16)
_snake_case = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase_ )
return image
def lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_snake_case = UNetaDConditionModel(
act_fn='gelu' , attention_head_dim=8 , norm_num_groups=lowerCAmelCase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) , in_channels=8 , mid_block_type=lowerCAmelCase_ , only_cross_attention=lowerCAmelCase_ , out_channels=5 , resnet_time_scale_shift='scale_shift' , time_embedding_type='fourier' , timestep_post_act='gelu' , up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') , )
_snake_case = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
_snake_case = EulerDiscreteScheduler(prediction_type='sample' )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='quick_gelu' , projection_dim=5_12 , )
_snake_case = CLIPTextModel(lowerCAmelCase_ )
_snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_snake_case = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ):
"""simple docstring"""
if str(lowerCAmelCase_ ).startswith('mps' ):
_snake_case = torch.manual_seed(lowerCAmelCase_ )
else:
_snake_case = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'cpu'
_snake_case = self.get_dummy_components()
_snake_case = self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
_snake_case = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
_snake_case = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase_ , 1E-3 )
def lowerCamelCase ( self ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def lowerCamelCase ( self ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def lowerCamelCase ( self ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCamelCase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def lowerCamelCase ( self ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def lowerCamelCase ( self ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def lowerCamelCase ( self ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
_snake_case = self.get_dummy_components()
_snake_case = self.pipeline_class(**lowerCAmelCase_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = 2
_snake_case = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_snake_case = getattr(lowerCAmelCase_ , scheduler_enum.name )
_snake_case = scheduler_cls.from_config(pipe.scheduler.config )
_snake_case = pipe(**lowerCAmelCase_ )[0]
outputs.append(lowerCAmelCase_ )
assert check_same_shape(lowerCAmelCase_ )
@require_torch_gpu
@slow
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = torch.manual_seed(33 )
_snake_case = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' , torch_dtype=torch.floataa )
pipe.to('cuda' )
_snake_case = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
_snake_case = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
_snake_case = pipe(lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type='latent' ).images
_snake_case = upscaler(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=lowerCAmelCase_ , output_type='np' , ).images[0]
_snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = torch.manual_seed(33 )
_snake_case = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
_snake_case = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
_snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
_snake_case = upscaler(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=lowerCAmelCase_ , output_type='np' , ).images[0]
_snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 160 | 0 |
def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] ):
_A : Dict = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Dict ):
_A : Tuple = 0
while b > 0:
if b & 1:
_A : Optional[Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 11 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
lowerCamelCase_ = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowerCamelCase_ = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCamelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , lowercase , )
@cached_property
def SCREAMING_SNAKE_CASE_( self ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
lowerCamelCase_ = torch.device("cpu" )
lowerCamelCase_ = 0
elif is_sagemaker_model_parallel_available():
lowerCamelCase_ = smp.local_rank()
lowerCamelCase_ = torch.device("cuda" , lowercase )
lowerCamelCase_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowerCamelCase_ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowerCamelCase_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
if device.type == "cuda":
torch.cuda.set_device(lowercase )
return device
@property
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return False
| 19 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
__SCREAMING_SNAKE_CASE = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
A : Any = "lm_head"
A : str = getattr(lowercase_ , lowercase_ )
if weight_type is not None:
A : str = getattr(lowercase_ , lowercase_ ).shape
else:
A : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
A : Optional[int] = value
elif weight_type == "weight_g":
A : Dict = value
elif weight_type == "weight_v":
A : Union[str, Any] = value
elif weight_type == "bias":
A : Any = value
else:
A : Any = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : Tuple = []
A : int = fairseq_model.state_dict()
A : Optional[int] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
A : Any = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == "group" , )
A : List[str] = True
else:
for key, mapped_key in MAPPING.items():
A : str = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A : int = True
if "*" in mapped_key:
A : str = name.split(lowercase_ )[0].split("." )[-2]
A : Dict = mapped_key.replace("*" , lowercase_ )
if "weight_g" in name:
A : List[Any] = "weight_g"
elif "weight_v" in name:
A : List[str] = "weight_v"
elif "bias" in name:
A : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A : int = "weight"
else:
A : List[str] = None
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : List[str] = full_name.split("conv_layers." )[-1]
A : List[str] = name.split("." )
A : int = int(items[0] )
A : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
A : Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
A : Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
A : Optional[int] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
A : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase_ )
@torch.no_grad()
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ):
if config_path is not None:
A : Union[str, Any] = UniSpeechConfig.from_pretrained(lowercase_ )
else:
A : Union[str, Any] = UniSpeechConfig()
if is_finetuned:
if dict_path:
A : Tuple = Dictionary.load_from_json(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A : List[Any] = target_dict.pad_index
A : List[str] = target_dict.bos_index
A : List[str] = target_dict.eos_index
A : Any = len(target_dict.symbols )
A : Union[str, Any] = os.path.join(lowercase_ , "vocab.json" )
if not os.path.isdir(lowercase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase_ ) )
return
os.makedirs(lowercase_ , exist_ok=lowercase_ )
A : Union[str, Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
A : Dict = 42
A : Optional[Any] = 43
with open(lowercase_ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowercase_ , lowercase_ )
A : Tuple = WavaVecaPhonemeCTCTokenizer(
lowercase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowercase_ , )
A : int = True if config.feat_extract_norm == "layer" else False
A : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , )
A : Any = WavaVecaProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
A : List[str] = UniSpeechForCTC(lowercase_ )
else:
A : Dict = UniSpeechForPreTraining(lowercase_ )
if is_finetuned:
A , A , A : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
A , A , A : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A : List[str] = model[0].eval()
recursively_load_weights(lowercase_ , lowercase_ , lowercase_ )
hf_unispeech.save_pretrained(lowercase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 357 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 256 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self ) -> Tuple:
lowerCAmelCase_ :Dict = []
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> List[str]:
self.events.append("""on_init_end""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Any:
self.events.append("""on_train_begin""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Tuple:
self.events.append("""on_train_end""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Any:
self.events.append("""on_epoch_begin""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Optional[Any]:
self.events.append("""on_epoch_end""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Optional[Any]:
self.events.append("""on_step_begin""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Any:
self.events.append("""on_step_end""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Any:
self.events.append("""on_evaluate""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> int:
self.events.append("""on_predict""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Union[str, Any]:
self.events.append("""on_save""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Dict:
self.events.append("""on_log""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Optional[Any]:
self.events.append("""on_prediction_step""" )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Optional[int] = tempfile.mkdtemp()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
shutil.rmtree(self.output_dir )
def __lowerCAmelCase ( self , __A=0 , __A=0 , __A=64 , __A=64 , __A=None , __A=False , **__A ) -> Optional[int]:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowerCAmelCase_ :Optional[Any] = RegressionDataset(length=__A )
lowerCAmelCase_ :str = RegressionDataset(length=__A )
lowerCAmelCase_ :Dict = RegressionModelConfig(a=__A , b=__A )
lowerCAmelCase_ :Dict = RegressionPreTrainedModel(__A )
lowerCAmelCase_ :Any = TrainingArguments(self.output_dir , disable_tqdm=__A , report_to=[] , **__A )
return Trainer(
__A , __A , train_dataset=__A , eval_dataset=__A , callbacks=__A , )
def __lowerCAmelCase ( self , __A , __A ) -> List[Any]:
self.assertEqual(len(__A ) , len(__A ) )
# Order doesn't matter
lowerCAmelCase_ :Optional[int] = sorted(__A , key=lambda __A : cb.__name__ if isinstance(__A , __A ) else cb.__class__.__name__ )
lowerCAmelCase_ :List[Any] = sorted(__A , key=lambda __A : cb.__name__ if isinstance(__A , __A ) else cb.__class__.__name__ )
for cba, cba in zip(__A , __A ):
if isinstance(__A , __A ) and isinstance(__A , __A ):
self.assertEqual(__A , __A )
elif isinstance(__A , __A ) and not isinstance(__A , __A ):
self.assertEqual(__A , cba.__class__ )
elif not isinstance(__A , __A ) and isinstance(__A , __A ):
self.assertEqual(cba.__class__ , __A )
else:
self.assertEqual(__A , __A )
def __lowerCAmelCase ( self , __A ) -> int:
lowerCAmelCase_ :Optional[int] = ["""on_init_end""", """on_train_begin"""]
lowerCAmelCase_ :Any = 0
lowerCAmelCase_ :Optional[Any] = len(trainer.get_eval_dataloader() )
lowerCAmelCase_ :List[Any] = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(__A ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Union[str, Any] = self.get_trainer()
lowerCAmelCase_ :Optional[int] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
# Callbacks passed at init are added to the default callbacks
lowerCAmelCase_ :Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowerCAmelCase_ :List[Any] = self.get_trainer(disable_tqdm=__A )
lowerCAmelCase_ :Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowerCAmelCase_ :Optional[int] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__A )
expected_callbacks.remove(__A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
lowerCAmelCase_ :int = self.get_trainer()
lowerCAmelCase_ :Tuple = trainer.pop_callback(__A )
self.assertEqual(cb.__class__ , __A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
trainer.add_callback(__A )
expected_callbacks.insert(0 , __A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
# We can also add, pop, or remove by instance
lowerCAmelCase_ :str = self.get_trainer()
lowerCAmelCase_ :Optional[int] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__A )
expected_callbacks.remove(__A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
lowerCAmelCase_ :int = self.get_trainer()
lowerCAmelCase_ :Tuple = trainer.callback_handler.callbacks[0]
lowerCAmelCase_ :Dict = trainer.pop_callback(__A )
self.assertEqual(__A , __A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
trainer.add_callback(__A )
expected_callbacks.insert(0 , __A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=__A )
lowerCAmelCase_ :Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowerCAmelCase_ :Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__A , self.get_expected_events(__A ) )
# Independent log/save/eval
lowerCAmelCase_ :Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowerCAmelCase_ :Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__A , self.get_expected_events(__A ) )
lowerCAmelCase_ :Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowerCAmelCase_ :Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__A , self.get_expected_events(__A ) )
lowerCAmelCase_ :Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
lowerCAmelCase_ :List[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__A , self.get_expected_events(__A ) )
lowerCAmelCase_ :Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
lowerCAmelCase_ :Optional[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__A , self.get_expected_events(__A ) )
# A bit of everything
lowerCAmelCase_ :Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
lowerCAmelCase_ :Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__A , self.get_expected_events(__A ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
lowerCAmelCase_ :Optional[int] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__A ) in warn_mock.call_args[0][0]
| 84 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Tuple = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __snake_case ( UpperCamelCase_ ):
_a = '''xlm-roberta-xl'''
def __init__( self : int , A_ : List[str]=2_5_0_8_8_0 , A_ : List[str]=2_5_6_0 , A_ : Optional[int]=3_6 , A_ : List[Any]=3_2 , A_ : Optional[int]=1_0_2_4_0 , A_ : Dict="gelu" , A_ : int=0.1 , A_ : Optional[Any]=0.1 , A_ : int=5_1_4 , A_ : Any=1 , A_ : Optional[Any]=0.02 , A_ : str=1e-05 , A_ : Dict=1 , A_ : Any=0 , A_ : Tuple=2 , A_ : str="absolute" , A_ : str=True , A_ : List[str]=None , **A_ : Dict , ):
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_)
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : List[str] = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Tuple = hidden_dropout_prob
lowerCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : Dict = type_vocab_size
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = position_embedding_type
lowerCAmelCase_ : Optional[Any] = use_cache
lowerCAmelCase_ : List[str] = classifier_dropout
class __snake_case ( UpperCamelCase_ ):
@property
def UpperCAmelCase__ ( self : List[str]):
if self.task == "multiple-choice":
lowerCAmelCase_ : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase_ : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 103 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : Tuple = SwinConfig(
embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), window_size=12, out_features=["stage2", "stage3", "stage4"], )
__UpperCAmelCase : Dict = DetaConfig(
backbone_config=_UpperCAmelCase, num_queries=900, encoder_ffn_dim=2048, decoder_ffn_dim=2048, num_feature_levels=5, assign_first_stage=_UpperCAmelCase, with_box_refine=_UpperCAmelCase, two_stage=_UpperCAmelCase, )
# set labels
__UpperCAmelCase : str = "huggingface/label-files"
if "o365" in model_name:
__UpperCAmelCase : List[Any] = 366
__UpperCAmelCase : List[str] = "object365-id2label.json"
else:
__UpperCAmelCase : str = 91
__UpperCAmelCase : Any = "coco-detection-id2label.json"
__UpperCAmelCase : Optional[Any] = num_labels
__UpperCAmelCase : Optional[Any] = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase, _UpperCAmelCase, repo_type="dataset" ) ), "r" ) )
__UpperCAmelCase : Tuple = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__UpperCAmelCase : Dict = idalabel
__UpperCAmelCase : Any = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : Optional[int] = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.reduction.weight", F"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.weight", F"model.backbone.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.bias", F"model.backbone.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", F"model.encoder.layers.{i}.self_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", F"model.encoder.layers.{i}.self_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", F"model.encoder.layers.{i}.self_attn.attention_weights.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", F"model.encoder.layers.{i}.self_attn.attention_weights.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.weight", F"model.encoder.layers.{i}.self_attn.value_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.bias", F"model.encoder.layers.{i}.self_attn.value_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.weight", F"model.encoder.layers.{i}.self_attn.output_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.bias", F"model.encoder.layers.{i}.self_attn.output_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.weight", F"model.encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"model.encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"model.encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"model.encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"model.encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"model.encoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"model.encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"model.encoder.layers.{i}.final_layer_norm.bias") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", F"model.decoder.layers.{i}.encoder_attn.attention_weights.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", F"model.decoder.layers.{i}.encoder_attn.attention_weights.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", F"model.decoder.layers.{i}.encoder_attn.value_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", F"model.decoder.layers.{i}.encoder_attn.value_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", F"model.decoder.layers.{i}.encoder_attn.output_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", F"model.decoder.layers.{i}.encoder_attn.output_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.weight", F"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"model.decoder.layers.{i}.self_attn.out_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"model.decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.weight", F"model.decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.bias", F"model.decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"model.decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"model.decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"model.decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"model.decoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"model.decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"model.decoder.layers.{i}.final_layer_norm.bias") )
# fmt: on
return rename_keys
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : Dict = dct.pop(_UpperCAmelCase )
__UpperCAmelCase : Optional[int] = val
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__UpperCAmelCase : List[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__UpperCAmelCase : Dict = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight" )
__UpperCAmelCase : Optional[Any] = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : str = in_proj_weight[:dim, :]
__UpperCAmelCase : str = in_proj_bias[: dim]
__UpperCAmelCase : Tuple = in_proj_weight[
dim : dim * 2, :
]
__UpperCAmelCase : Optional[Any] = in_proj_bias[
dim : dim * 2
]
__UpperCAmelCase : List[str] = in_proj_weight[
-dim :, :
]
__UpperCAmelCase : Dict = in_proj_bias[-dim :]
# fmt: on
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
# transformer decoder self-attention layers
__UpperCAmelCase : Union[str, Any] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__UpperCAmelCase : List[Any] = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
__UpperCAmelCase : Any = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : int = in_proj_weight[:hidden_size, :]
__UpperCAmelCase : List[str] = in_proj_bias[:hidden_size]
__UpperCAmelCase : Optional[Any] = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__UpperCAmelCase : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
__UpperCAmelCase : List[Any] = in_proj_weight[-hidden_size:, :]
__UpperCAmelCase : Any = in_proj_bias[-hidden_size:]
def __UpperCamelCase ( ):
__UpperCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCAmelCase : Tuple = Image.open(requests.get(_UpperCAmelCase, stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : List[Any] = get_deta_config(_UpperCAmelCase )
# load original state dict
if model_name == "deta-swin-large":
__UpperCAmelCase : str = hf_hub_download(repo_id="nielsr/deta-checkpoints", filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
__UpperCAmelCase : int = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365", filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(F"Model name {model_name} not supported" )
__UpperCAmelCase : Dict = torch.load(_UpperCAmelCase, map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(_UpperCAmelCase, param.shape )
# rename keys
__UpperCAmelCase : List[Any] = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
read_in_swin_q_k_v(_UpperCAmelCase, config.backbone_config )
read_in_decoder_q_k_v(_UpperCAmelCase, _UpperCAmelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__UpperCAmelCase : Dict = state_dict.pop(_UpperCAmelCase )
__UpperCAmelCase : List[str] = val
if "input_proj" in key:
__UpperCAmelCase : Union[str, Any] = state_dict.pop(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__UpperCAmelCase : Any = state_dict.pop(_UpperCAmelCase )
__UpperCAmelCase : str = val
# finally, create HuggingFace model and load state dict
__UpperCAmelCase : Tuple = DetaForObjectDetection(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
__UpperCAmelCase : Tuple = "cuda" if torch.cuda.is_available() else "cpu"
model.to(_UpperCAmelCase )
# load image processor
__UpperCAmelCase : List[str] = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
__UpperCAmelCase : Optional[int] = prepare_img()
__UpperCAmelCase : int = processor(images=_UpperCAmelCase, return_tensors="pt" )
__UpperCAmelCase : Tuple = encoding["pixel_values"]
__UpperCAmelCase : Dict = model(pixel_values.to(_UpperCAmelCase ) )
# verify logits
print("Logits:", outputs.logits[0, :3, :3] )
print("Boxes:", outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__UpperCAmelCase : int = torch.tensor(
[[-7.6_308, -2.8_485, -5.3_737], [-7.2_037, -4.5_505, -4.8_027], [-7.2_943, -4.2_611, -4.6_617]] )
__UpperCAmelCase : Tuple = torch.tensor([[0.4_987, 0.4_969, 0.9_999], [0.2_549, 0.5_498, 0.4_805], [0.5_498, 0.2_757, 0.0_569]] )
elif model_name == "deta-swin-large-o365":
__UpperCAmelCase : List[str] = torch.tensor(
[[-8.0_122, -3.5_720, -4.9_717], [-8.1_547, -3.6_886, -4.6_389], [-7.6_610, -3.6_194, -5.0_134]] )
__UpperCAmelCase : Dict = torch.tensor([[0.2_523, 0.5_549, 0.4_881], [0.7_715, 0.4_149, 0.4_601], [0.5_503, 0.2_753, 0.0_575]] )
assert torch.allclose(outputs.logits[0, :3, :3], expected_logits.to(_UpperCAmelCase ), atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes.to(_UpperCAmelCase ), atol=1E-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"Saving PyTorch model and processor to {pytorch_dump_folder_path}..." )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(F"jozhang97/{model_name}" )
processor.push_to_hub(F"jozhang97/{model_name}" )
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowerCAmelCase__ : Tuple = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 37 |
'''simple docstring'''
def __UpperCamelCase ( _UpperCAmelCase ):
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
__UpperCAmelCase : List[str] = 4
__UpperCAmelCase : int = (1 << p) - 1
for _ in range(p - 2 ):
__UpperCAmelCase : Optional[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 37 | 1 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __lowercase ( a__ ) -> Optional[int]:
if not is_accelerate_available():
return method
__SCREAMING_SNAKE_CASE = version.parse(accelerate.__version__ ).base_version
if version.parse(__lowerCamelCase ) < version.parse('0.17.0' ):
return method
def wrapper(self , *a__ , **a__ ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *__lowerCamelCase , **__lowerCamelCase )
return wrapper
| 257 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Any , __lowercase : Optional[NestedDataStructureLike[PathLike]] = None , __lowercase : Optional[NamedSplit] = None , __lowercase : Optional[Features] = None , __lowercase : str = None , __lowercase : bool = False , __lowercase : bool = False , __lowercase : Optional[int] = None , **__lowercase : List[str] , ) -> Tuple:
__UpperCAmelCase : Any = path_or_paths
__UpperCAmelCase : Dict = split if split or isinstance(__lowercase , __lowercase ) else """train"""
__UpperCAmelCase : Optional[int] = features
__UpperCAmelCase : str = cache_dir
__UpperCAmelCase : str = keep_in_memory
__UpperCAmelCase : Optional[int] = streaming
__UpperCAmelCase : Dict = num_proc
__UpperCAmelCase : Tuple = kwargs
@abstractmethod
def UpperCAmelCase ( self : List[Any] ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , __lowercase : Optional[Features] = None , __lowercase : str = None , __lowercase : bool = False , __lowercase : bool = False , __lowercase : Optional[int] = None , **__lowercase : Optional[Any] , ) -> Optional[int]:
__UpperCAmelCase : Optional[Any] = features
__UpperCAmelCase : str = cache_dir
__UpperCAmelCase : Optional[int] = keep_in_memory
__UpperCAmelCase : Dict = streaming
__UpperCAmelCase : Optional[Any] = num_proc
__UpperCAmelCase : Union[str, Any] = kwargs
@abstractmethod
def UpperCAmelCase ( self : List[str] ) -> Union[Dataset, IterableDataset]:
pass
| 114 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 361 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = KandinskyVaaPriorPipeline
A__ = ['''prompt''']
A__ = ['''prompt''', '''negative_prompt''']
A__ = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
A__ = False
@property
def A_ ( self : Dict ) -> List[str]:
'''simple docstring'''
return 32
@property
def A_ ( self : Any ) -> str:
'''simple docstring'''
return 32
@property
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim
@property
def A_ ( self : str ) -> int:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return 100
@property
def A_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
__snake_case : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def A_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__a )
@property
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Any = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
__snake_case : List[Any] = PriorTransformer(**__a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__snake_case : Any = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__snake_case : Optional[Any] = CLIPVisionModelWithProjection(__a )
return model
@property
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = CLIPImageProcessor(
crop_size=224 , do_center_crop=__a , do_normalize=__a , do_resize=__a , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , )
return image_processor
def A_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
__snake_case : Tuple = self.dummy_prior
__snake_case : List[str] = self.dummy_image_encoder
__snake_case : str = self.dummy_text_encoder
__snake_case : List[str] = self.dummy_tokenizer
__snake_case : List[str] = self.dummy_image_processor
__snake_case : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=__a , clip_sample_range=1_0.0 , )
__snake_case : str = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def A_ ( self : List[Any] , __a : Optional[Any] , __a : Tuple=0 ) -> Any:
'''simple docstring'''
if str(__a ).startswith('mps' ):
__snake_case : List[str] = torch.manual_seed(__a )
else:
__snake_case : List[str] = torch.Generator(device=__a ).manual_seed(__a )
__snake_case : List[Any] = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def A_ ( self : str ) -> Dict:
'''simple docstring'''
__snake_case : str = 'cpu'
__snake_case : List[str] = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**__a )
__snake_case : Optional[Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[int] = pipe(**self.get_dummy_inputs(__a ) )
__snake_case : List[str] = output.image_embeds
__snake_case : str = pipe(
**self.get_dummy_inputs(__a ) , return_dict=__a , )[0]
__snake_case : Union[str, Any] = image[0, -10:]
__snake_case : Any = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__snake_case : List[Any] = np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = torch_device == 'cpu'
__snake_case : Dict = True
__snake_case : Union[str, Any] = False
self._test_inference_batch_single_identical(
test_max_difference=__a , relax_max_difference=__a , test_mean_pixel_difference=__a , )
@skip_mps
def A_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Dict = torch_device == 'cpu'
__snake_case : Optional[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=__a , test_mean_pixel_difference=__a , )
| 0 | 0 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __a ( self ) -> Union[str, Any]:
a, a : Tuple = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
a, a : int = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=lowerCAmelCase__ , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
a : str = controlnet_params
a : Union[str, Any] = "bird"
a : List[str] = jax.device_count()
a : Union[str, Any] = pipe.prepare_text_inputs([prompts] * num_samples )
a : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
a : str = pipe.prepare_image_inputs([canny_image] * num_samples )
a : Union[str, Any] = jax.random.PRNGKey(0 )
a : Any = jax.random.split(lowerCAmelCase__ , jax.device_count() )
a : Dict = replicate(lowerCAmelCase__ )
a : List[str] = shard(lowerCAmelCase__ )
a : List[Any] = shard(lowerCAmelCase__ )
a : Any = pipe(
prompt_ids=lowerCAmelCase__ , image=lowerCAmelCase__ , params=lowerCAmelCase__ , prng_seed=lowerCAmelCase__ , num_inference_steps=50 , jit=lowerCAmelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
a : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a : Optional[int] = images[0, 253:256, 253:256, -1]
a : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a : Dict = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __a ( self ) -> str:
a, a : Any = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
a, a : Tuple = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=lowerCAmelCase__ , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
a : Dict = controlnet_params
a : Any = "Chef in the kitchen"
a : Optional[Any] = jax.device_count()
a : Union[str, Any] = pipe.prepare_text_inputs([prompts] * num_samples )
a : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
a : List[Any] = pipe.prepare_image_inputs([pose_image] * num_samples )
a : Tuple = jax.random.PRNGKey(0 )
a : Union[str, Any] = jax.random.split(lowerCAmelCase__ , jax.device_count() )
a : Dict = replicate(lowerCAmelCase__ )
a : List[str] = shard(lowerCAmelCase__ )
a : Any = shard(lowerCAmelCase__ )
a : Any = pipe(
prompt_ids=lowerCAmelCase__ , image=lowerCAmelCase__ , params=lowerCAmelCase__ , prng_seed=lowerCAmelCase__ , num_inference_steps=50 , jit=lowerCAmelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
a : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a : Union[str, Any] = images[0, 253:256, 253:256, -1]
a : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a : List[Any] = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 105 |
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
A = logging.get_logger('''transformers.models.encodec''')
A = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
A = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
A = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
A = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
A = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
A = []
A = []
def __A ( a_ :Optional[int] , a_ :str , a_ :Optional[Any] , a_ :Optional[Any] , a_ :Tuple) -> str:
for attribute in key.split('''.'''):
__a : Union[str, Any] = getattr(a_ , a_)
if weight_type is not None:
__a : Optional[Any] = getattr(a_ , a_).shape
else:
__a : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""")
if weight_type == "weight":
__a : Tuple = value
elif weight_type == "weight_g":
__a : List[str] = value
elif weight_type == "weight_v":
__a : Optional[int] = value
elif weight_type == "bias":
__a : List[str] = value
elif weight_type == "running_mean":
__a : List[str] = value
elif weight_type == "running_var":
__a : List[Any] = value
elif weight_type == "num_batches_tracked":
__a : List[Any] = value
elif weight_type == "weight_ih_l0":
__a : Optional[int] = value
elif weight_type == "weight_hh_l0":
__a : Any = value
elif weight_type == "bias_ih_l0":
__a : Union[str, Any] = value
elif weight_type == "bias_hh_l0":
__a : Optional[Any] = value
elif weight_type == "weight_ih_l1":
__a : Dict = value
elif weight_type == "weight_hh_l1":
__a : str = value
elif weight_type == "bias_ih_l1":
__a : Union[str, Any] = value
elif weight_type == "bias_hh_l1":
__a : Union[str, Any] = value
else:
__a : str = value
logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""")
def __A ( a_ :Dict , a_ :Any) -> Tuple:
for key in ignore_keys:
if key.endswith('''.*'''):
if name.startswith(key[:-1]):
return True
elif ".*." in key:
__a , __a : Union[str, Any] = key.split('''.*.''')
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __A ( a_ :Optional[Any] , a_ :Tuple , a_ :List[str]) -> Any:
__a : Tuple = []
if model_name == "encodec_24khz" or "encodec_32khz":
__a : int = MAPPING_24K
elif model_name == "encodec_48khz":
__a : List[str] = MAPPING_48K
else:
raise ValueError(F"""Unsupported model: {model_name}""")
for name, value in orig_dict.items():
if should_ignore(a_ , a_):
logger.info(F"""{name} was ignored""")
continue
__a : Tuple = False
for key, mapped_key in MAPPING.items():
if "*" in key:
__a , __a : Optional[Any] = key.split('''.*.''')
if prefix in name and suffix in name:
__a : int = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''') and name.endswith('''embed_avg'''):
continue
__a : List[str] = True
if "*" in mapped_key:
__a : Optional[Any] = name.split(a_)[0].split('''.''')[-2]
__a : str = mapped_key.replace('''*''' , a_)
if "weight_g" in name:
__a : Optional[Any] = '''weight_g'''
elif "weight_v" in name:
__a : Optional[Any] = '''weight_v'''
elif "weight_ih_l0" in name:
__a : Tuple = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
__a : Tuple = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
__a : List[str] = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
__a : int = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
__a : Optional[int] = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
__a : List[str] = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
__a : List[str] = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
__a : str = '''bias_hh_l1'''
elif "bias" in name:
__a : Union[str, Any] = '''bias'''
elif "weight" in name:
__a : Any = '''weight'''
elif "running_mean" in name:
__a : List[Any] = '''running_mean'''
elif "running_var" in name:
__a : int = '''running_var'''
elif "num_batches_tracked" in name:
__a : int = '''num_batches_tracked'''
else:
__a : List[str] = None
set_recursively(a_ , a_ , a_ , a_ , a_)
continue
if not is_used:
unused_weights.append(a_)
logger.warning(F"""Unused weights: {unused_weights}""")
@torch.no_grad()
def __A ( a_ :Dict , a_ :Optional[int] , a_ :Union[str, Any] , a_ :Any=None , a_ :Tuple=None , ) -> List[Any]:
if config_path is not None:
__a : List[str] = EncodecConfig.from_pretrained(a_)
else:
__a : List[Any] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
__a : List[Any] = [8, 5, 4, 4]
__a : int = [2.2]
__a : int = 64
__a : List[Any] = 3_20_00
__a : Union[str, Any] = 20_48
__a : Optional[int] = False
__a : str = False
__a : Dict = False
elif model_name == "encodec_48khz":
__a : Any = [8, 5, 4, 2]
__a : Dict = [3.0, 6.0, 1_2.0, 2_4.0]
__a : List[Any] = 4_80_00
__a : Dict = 2
__a : int = False
__a : List[str] = '''time_group_norm'''
__a : str = True
__a : Dict = 1.0
__a : Optional[Any] = 0.0_1
else:
raise ValueError(F"""Unknown model name: {model_name}""")
__a : Union[str, Any] = EncodecModel(a_)
__a : List[Any] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(a_)
__a : List[Any] = torch.load(a_)
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
__a : Optional[Any] = original_checkpoint['''best_state''']
recursively_load_weights(a_ , a_ , a_)
model.save_pretrained(a_)
if repo_id:
print('''Pushing to the hub...''')
feature_extractor.push_to_hub(a_)
model.push_to_hub(a_)
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
A = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 160 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=snake_case ):
SCREAMING_SNAKE_CASE = ['''flax''', '''transformers''']
def __init__( self ,*A ,**A ):
requires_backends(self ,["""flax""", """transformers"""] )
@classmethod
def _UpperCamelCase ( cls ,*A ,**A ):
requires_backends(cls ,["""flax""", """transformers"""] )
@classmethod
def _UpperCamelCase ( cls ,*A ,**A ):
requires_backends(cls ,["""flax""", """transformers"""] )
class lowerCamelCase__ ( metaclass=snake_case ):
SCREAMING_SNAKE_CASE = ['''flax''', '''transformers''']
def __init__( self ,*A ,**A ):
requires_backends(self ,["""flax""", """transformers"""] )
@classmethod
def _UpperCamelCase ( cls ,*A ,**A ):
requires_backends(cls ,["""flax""", """transformers"""] )
@classmethod
def _UpperCamelCase ( cls ,*A ,**A ):
requires_backends(cls ,["""flax""", """transformers"""] )
class lowerCamelCase__ ( metaclass=snake_case ):
SCREAMING_SNAKE_CASE = ['''flax''', '''transformers''']
def __init__( self ,*A ,**A ):
requires_backends(self ,["""flax""", """transformers"""] )
@classmethod
def _UpperCamelCase ( cls ,*A ,**A ):
requires_backends(cls ,["""flax""", """transformers"""] )
@classmethod
def _UpperCamelCase ( cls ,*A ,**A ):
requires_backends(cls ,["""flax""", """transformers"""] )
class lowerCamelCase__ ( metaclass=snake_case ):
SCREAMING_SNAKE_CASE = ['''flax''', '''transformers''']
def __init__( self ,*A ,**A ):
requires_backends(self ,["""flax""", """transformers"""] )
@classmethod
def _UpperCamelCase ( cls ,*A ,**A ):
requires_backends(cls ,["""flax""", """transformers"""] )
@classmethod
def _UpperCamelCase ( cls ,*A ,**A ):
requires_backends(cls ,["""flax""", """transformers"""] )
| 234 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger("""transformers.models.speecht5""")
_UpperCamelCase = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
_UpperCamelCase = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
_UpperCamelCase = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
_UpperCamelCase = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
_UpperCamelCase = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
_UpperCamelCase = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
_UpperCamelCase = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
_UpperCamelCase = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
_UpperCamelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_UpperCamelCase = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_UpperCamelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_UpperCamelCase = []
_UpperCamelCase = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
_UpperCamelCase = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
_UpperCamelCase = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
_UpperCamelCase = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
for attribute in key.split(""".""" ):
UpperCAmelCase = getattr(_snake_case , _snake_case )
if weight_type is not None:
UpperCAmelCase = getattr(_snake_case , _snake_case ).shape
else:
UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCAmelCase = value
elif weight_type == "weight_g":
UpperCAmelCase = value
elif weight_type == "weight_v":
UpperCAmelCase = value
elif weight_type == "bias":
UpperCAmelCase = value
elif weight_type == "running_mean":
UpperCAmelCase = value
elif weight_type == "running_var":
UpperCAmelCase = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase = value
else:
UpperCAmelCase = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCAmelCase , UpperCAmelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
if task == "s2t":
UpperCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase = MAPPING_S2T
UpperCAmelCase = IGNORE_KEYS_S2T
elif task == "t2s":
UpperCAmelCase = None
UpperCAmelCase = MAPPING_T2S
UpperCAmelCase = IGNORE_KEYS_T2S
elif task == "s2s":
UpperCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase = MAPPING_S2S
UpperCAmelCase = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(_snake_case , _snake_case ):
logger.info(F'''{name} was ignored''' )
continue
UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
UpperCAmelCase , UpperCAmelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
UpperCAmelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
UpperCAmelCase = True
if "*" in mapped_key:
UpperCAmelCase = name.split(_snake_case )[0].split(""".""" )[-2]
UpperCAmelCase = mapped_key.replace("""*""" , _snake_case )
if "weight_g" in name:
UpperCAmelCase = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase = """weight_v"""
elif "bias" in name:
UpperCAmelCase = """bias"""
elif "weight" in name:
UpperCAmelCase = """weight"""
elif "running_mean" in name:
UpperCAmelCase = """running_mean"""
elif "running_var" in name:
UpperCAmelCase = """running_var"""
elif "num_batches_tracked" in name:
UpperCAmelCase = """num_batches_tracked"""
else:
UpperCAmelCase = None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase = name.split(""".""" )
UpperCAmelCase = int(items[0] )
UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , ):
"""simple docstring"""
if config_path is not None:
UpperCAmelCase = SpeechTaConfig.from_pretrained(_snake_case )
else:
UpperCAmelCase = SpeechTaConfig()
if task == "s2t":
UpperCAmelCase = config.max_text_positions
UpperCAmelCase = SpeechTaForSpeechToText(_snake_case )
elif task == "t2s":
UpperCAmelCase = 1876
UpperCAmelCase = 600
UpperCAmelCase = config.max_speech_positions
UpperCAmelCase = SpeechTaForTextToSpeech(_snake_case )
elif task == "s2s":
UpperCAmelCase = 1876
UpperCAmelCase = config.max_speech_positions
UpperCAmelCase = SpeechTaForSpeechToSpeech(_snake_case )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
UpperCAmelCase = SpeechTaTokenizer(_snake_case , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
UpperCAmelCase = AddedToken("""<mask>""" , lstrip=_snake_case , rstrip=_snake_case )
UpperCAmelCase = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = SpeechTaProcessor(tokenizer=_snake_case , feature_extractor=_snake_case )
processor.save_pretrained(_snake_case )
UpperCAmelCase = torch.load(_snake_case )
recursively_load_weights(fairseq_checkpoint["""model"""] , _snake_case , _snake_case )
model.save_pretrained(_snake_case )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(_snake_case )
model.push_to_hub(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCamelCase = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 234 | 1 |
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ = 50 ) -> int:
__lowerCamelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 67 | """simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class UpperCAmelCase_ :
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Any ) -> Tuple:
raise NotImplementedError()
def _UpperCamelCase ( self : List[Any] ) -> Dict:
raise NotImplementedError()
class UpperCAmelCase_ ( _lowercase):
def __init__( self : Dict , __UpperCamelCase : "AutoTokenizer" , __UpperCamelCase : bool = False , **__UpperCamelCase : Tuple ) -> str:
_UpperCamelCase = tokenizer
_UpperCamelCase = skip_prompt
_UpperCamelCase = decode_kwargs
# variables used in the streaming process
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = True
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Dict ) -> Optional[Any]:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
_UpperCamelCase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_UpperCamelCase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
_UpperCamelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
_UpperCamelCase = text[self.print_len :]
_UpperCamelCase = []
_UpperCamelCase = 0
# If the last token is a CJK character, we print the characters.
elif len(__UpperCamelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
_UpperCamelCase = text[self.print_len :]
self.print_len += len(__UpperCamelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_UpperCamelCase = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(__UpperCamelCase )
self.on_finalized_text(__UpperCamelCase )
def _UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
_UpperCamelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
_UpperCamelCase = text[self.print_len :]
_UpperCamelCase = []
_UpperCamelCase = 0
else:
_UpperCamelCase = ''''''
_UpperCamelCase = True
self.on_finalized_text(__UpperCamelCase , stream_end=__UpperCamelCase )
def _UpperCamelCase ( self : int , __UpperCamelCase : str , __UpperCamelCase : bool = False ) -> Tuple:
print(__UpperCamelCase , flush=__UpperCamelCase , end='''''' if not stream_end else None )
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : Dict ) -> str:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
class UpperCAmelCase_ ( _lowercase):
def __init__( self : Union[str, Any] , __UpperCamelCase : "AutoTokenizer" , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[float] = None , **__UpperCamelCase : Optional[int] ) -> Optional[Any]:
super().__init__(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
_UpperCamelCase = Queue()
_UpperCamelCase = None
_UpperCamelCase = timeout
def _UpperCamelCase ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : bool = False ) -> Any:
self.text_queue.put(__UpperCamelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Optional[Any] ) -> List[str]:
return self
def _UpperCamelCase ( self : int ) -> Dict:
_UpperCamelCase = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 256 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowercase ( A_ , A_=False )-> List[Any]:
'''simple docstring'''
a : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowercase ( A_ , A_ , A_=False )-> List[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
a : int = ""
else:
a : List[str] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a : Optional[int] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
a : Optional[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a : Tuple = in_proj_weight[
: config.hidden_size, :
]
a : int = in_proj_bias[: config.hidden_size]
a : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a : List[str] = in_proj_weight[
-config.hidden_size :, :
]
a : Optional[int] = in_proj_bias[-config.hidden_size :]
def lowercase ( A_ )-> Any:
'''simple docstring'''
a : Tuple = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(A_ , A_ )
def lowercase ( A_ , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
a : int = dct.pop(A_ )
a : Optional[int] = val
def lowercase ( )-> List[str]:
'''simple docstring'''
a : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
a : Dict = Image.open(requests.get(A_ , stream=A_ ).raw )
return im
@torch.no_grad()
def lowercase ( A_ , A_ , A_=True )-> List[Any]:
'''simple docstring'''
a : Optional[Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
a : str = 8
# set labels if required
if not base_model:
a : Dict = 1_000
a : int = "huggingface/label-files"
a : List[str] = "imagenet-1k-id2label.json"
a : Optional[Any] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()}
a : Dict = idalabel
a : List[str] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
a : Dict = 384
a : List[str] = 1_536
a : Union[str, Any] = 12
a : Optional[Any] = 6
# load original model from torch hub
a : Optional[Any] = torch.hub.load("facebookresearch/dino:main" , A_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
a : Any = original_model.state_dict()
if base_model:
remove_classification_head_(A_ )
a : Union[str, Any] = create_rename_keys(A_ , base_model=A_ )
for src, dest in rename_keys:
rename_key(A_ , A_ , A_ )
read_in_q_k_v(A_ , A_ , A_ )
# load HuggingFace model
if base_model:
a : Dict = ViTModel(A_ , add_pooling_layer=A_ ).eval()
else:
a : Optional[Any] = ViTForImageClassification(A_ ).eval()
model.load_state_dict(A_ )
# Check outputs on an image, prepared by ViTImageProcessor
a : Union[str, Any] = ViTImageProcessor()
a : Dict = image_processor(images=prepare_img() , return_tensors="pt" )
a : str = encoding["pixel_values"]
a : Optional[int] = model(A_ )
if base_model:
a : List[str] = original_model(A_ )
assert torch.allclose(A_ , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
a : Union[str, Any] = original_model(A_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(A_ , outputs.logits , atol=1e-3 )
Path(A_ ).mkdir(exist_ok=A_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A_ )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
__lowercase = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 226 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def lowercase ( A_ )-> Optional[Any]:
'''simple docstring'''
a : Optional[Any] = 384
if "tiny" in model_name:
a : List[str] = [3, 3, 9, 3]
a : Optional[Any] = [96, 192, 384, 768]
if "small" in model_name:
a : Tuple = [3, 3, 27, 3]
a : str = [96, 192, 384, 768]
if "base" in model_name:
a : Union[str, Any] = [3, 3, 27, 3]
a : Dict = [128, 256, 512, 1_024]
a : Any = 512
if "large" in model_name:
a : Optional[Any] = [3, 3, 27, 3]
a : str = [192, 384, 768, 1_536]
a : Dict = 768
if "xlarge" in model_name:
a : str = [3, 3, 27, 3]
a : List[Any] = [256, 512, 1_024, 2_048]
a : List[Any] = 1_024
# set label information
a : int = 150
a : str = "huggingface/label-files"
a : Tuple = "ade20k-id2label.json"
a : Dict = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
a : int = {int(A_ ): v for k, v in idalabel.items()}
a : List[Any] = {v: k for k, v in idalabel.items()}
a : Optional[int] = ConvNextConfig(
depths=A_ , hidden_sizes=A_ , out_features=["stage1", "stage2", "stage3", "stage4"] )
a : Tuple = UperNetConfig(
backbone_config=A_ , auxiliary_in_channels=A_ , num_labels=A_ , idalabel=A_ , labelaid=A_ , )
return config
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
a : int = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def lowercase ( A_ , A_ , A_ )-> str:
'''simple docstring'''
a : str = dct.pop(A_ )
a : str = val
def lowercase ( A_ , A_ , A_ )-> int:
'''simple docstring'''
a : Any = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
a : Tuple = model_name_to_url[model_name]
a : int = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" )["state_dict"]
a : Optional[Any] = get_upernet_config(A_ )
a : int = UperNetForSemanticSegmentation(A_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
a : Tuple = state_dict.pop(A_ )
if "bn" in key:
a : Optional[int] = key.replace("bn" , "batch_norm" )
a : Any = val
# rename keys
a : Any = create_rename_keys(A_ )
for src, dest in rename_keys:
rename_key(A_ , A_ , A_ )
model.load_state_dict(A_ )
# verify on image
a : Dict = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
a : Any = Image.open(requests.get(A_ , stream=A_ ).raw ).convert("RGB" )
a : Union[str, Any] = SegformerImageProcessor()
a : Dict = processor(A_ , return_tensors="pt" ).pixel_values
with torch.no_grad():
a : Any = model(A_ )
if model_name == "upernet-convnext-tiny":
a : List[str] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] )
elif model_name == "upernet-convnext-small":
a : int = torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] )
elif model_name == "upernet-convnext-base":
a : Union[str, Any] = torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] )
elif model_name == "upernet-convnext-large":
a : Union[str, Any] = torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] )
elif model_name == "upernet-convnext-xlarge":
a : str = torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , A_ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(A_ )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowercase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 226 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''nielsr/canine-s''': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_lowerCAmelCase = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_lowerCAmelCase = 0
_lowerCAmelCase = 0xe_000
_lowerCAmelCase = 0xe_001
_lowerCAmelCase = 0xe_002
_lowerCAmelCase = 0xe_003
_lowerCAmelCase = 0xe_004
# Maps special codepoints to human-readable names.
_lowerCAmelCase = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_lowerCAmelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,__UpperCAmelCase=chr(__UpperCAmelCase ) ,__UpperCAmelCase=chr(__UpperCAmelCase ) ,__UpperCAmelCase=chr(__UpperCAmelCase ) ,__UpperCAmelCase=chr(__UpperCAmelCase ) ,__UpperCAmelCase=chr(__UpperCAmelCase ) ,__UpperCAmelCase=chr(__UpperCAmelCase ) ,__UpperCAmelCase=False ,__UpperCAmelCase=2048 ,**__UpperCAmelCase ,) -> Optional[int]:
lowerCAmelCase__ : Optional[int] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else bos_token
lowerCAmelCase__ : Dict = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else eos_token
lowerCAmelCase__ : Union[str, Any] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else sep_token
lowerCAmelCase__ : int = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else cls_token
lowerCAmelCase__ : int = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : str = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else mask_token
super().__init__(
bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,add_prefix_space=__UpperCAmelCase ,model_max_length=__UpperCAmelCase ,**__UpperCAmelCase ,)
# Creates a mapping for looking up the IDs of special symbols.
lowerCAmelCase__ : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowerCAmelCase__ : Tuple = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowerCAmelCase__ : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowerCAmelCase__ : Optional[Any] = UNICODE_VOCAB_SIZE
lowerCAmelCase__ : Any = len(self._special_codepoints )
@property
def UpperCAmelCase_ ( self ) -> int:
return self._unicode_vocab_size
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
return list(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
try:
return ord(__UpperCAmelCase )
except TypeError:
raise ValueError(F"""invalid token: '{token}'""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__UpperCAmelCase )
except TypeError:
raise ValueError(F"""invalid id: {index}""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
return "".join(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
lowerCAmelCase__ : str = [self.sep_token_id]
lowerCAmelCase__ : Optional[int] = [self.cls_token_id]
lowerCAmelCase__ : str = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = [1] + ([0] * len(__UpperCAmelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(__UpperCAmelCase )) + [1]
return result
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
lowerCAmelCase__ : Optional[int] = [self.sep_token_id]
lowerCAmelCase__ : List[str] = [self.cls_token_id]
lowerCAmelCase__ : Optional[int] = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> int:
return ()
| 37 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'''vocab_file''': '''spiece.model'''}
_lowerCAmelCase = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
_lowerCAmelCase = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
_lowerCAmelCase = '''▁'''
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Any = VOCAB_FILES_NAMES
__lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase="[CLS]" ,__UpperCAmelCase="[SEP]" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="[SEP]" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="[CLS]" ,__UpperCAmelCase="[MASK]" ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase__ : Tuple = (
AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ,normalized=__UpperCAmelCase )
if isinstance(__UpperCAmelCase ,__UpperCAmelCase )
else mask_token
)
lowerCAmelCase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase ,remove_space=__UpperCAmelCase ,keep_accents=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,)
lowerCAmelCase__ : str = do_lower_case
lowerCAmelCase__ : int = remove_space
lowerCAmelCase__ : Tuple = keep_accents
lowerCAmelCase__ : Any = vocab_file
lowerCAmelCase__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return len(self.sp_model )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : Union[str, Any] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Any:
lowerCAmelCase__ : Optional[Any] = self.__dict__.copy()
lowerCAmelCase__ : Optional[Any] = None
return state
def __setstate__( self ,__UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : List[str] = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCAmelCase__ : Union[str, Any] = {}
lowerCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]:
if self.remove_space:
lowerCAmelCase__ : int = """ """.join(inputs.strip().split() )
else:
lowerCAmelCase__ : str = inputs
lowerCAmelCase__ : Tuple = outputs.replace("""``""" ,"""\"""" ).replace("""''""" ,"""\"""" )
if not self.keep_accents:
lowerCAmelCase__ : Any = unicodedata.normalize("""NFKD""" ,__UpperCAmelCase )
lowerCAmelCase__ : Dict = """""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
lowerCAmelCase__ : Tuple = outputs.lower()
return outputs
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : List[str] = self.preprocess_text(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase )
lowerCAmelCase__ : str = []
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowerCAmelCase__ : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase ,"""""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase__ : str = cur_pieces[1:]
else:
lowerCAmelCase__ : int = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
return self.sp_model.PieceToId(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
return self.sp_model.IdToPiece(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Tuple = """"""
lowerCAmelCase__ : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : List[str] = []
else:
current_sub_tokens.append(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
lowerCAmelCase__ : int = [self.sep_token_id]
lowerCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
lowerCAmelCase__ : List[str] = [self.sep_token_id]
lowerCAmelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : int = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase ,"""wb""" ) as fi:
lowerCAmelCase__ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 37 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase :Optional[int] = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :int = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :Union[str, Any] = ["LayoutLMv2FeatureExtractor"]
__UpperCAmelCase :str = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :int = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase :List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 240 |
'''simple docstring'''
def _a ( _lowercase : int = 600851475143 ):
'''simple docstring'''
try:
__UpperCAmelCase : str = int(_lowercase )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : List[str] = 2
while i * i <= n:
while n % i == 0:
__UpperCAmelCase : int = i
n //= i
i += 1
if n > 1:
__UpperCAmelCase : List[str] = n
return int(_lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""") | 240 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A ={
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =[
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =[
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =[
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = KandinskyVaaPriorPipeline
__snake_case = ['''prompt''']
__snake_case = ['''prompt''', '''negative_prompt''']
__snake_case = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
__snake_case = False
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Dict ) ->Any:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
return self.time_input_dim
@property
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
return 100
@property
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
a = PriorTransformer(**__UpperCAmelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a = CLIPVisionModelWithProjection(__UpperCAmelCase )
return model
@property
def __lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
a = CLIPImageProcessor(
crop_size=224 , do_center_crop=__UpperCAmelCase , do_normalize=__UpperCAmelCase , do_resize=__UpperCAmelCase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
a = self.dummy_prior
a = self.dummy_image_encoder
a = self.dummy_text_encoder
a = self.dummy_tokenizer
a = self.dummy_image_processor
a = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_000 , clip_sample=__UpperCAmelCase , clip_sample_range=10.0 , )
a = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str=0 ) ->int:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('''mps''' ):
a = torch.manual_seed(__UpperCAmelCase )
else:
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
a = '''cpu'''
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
a = output.image_embeds
a = pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0]
a = image[0, -10:]
a = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
a = torch_device == '''cpu'''
a = True
a = False
self._test_inference_batch_single_identical(
test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , test_mean_pixel_difference=__UpperCAmelCase , )
@skip_mps
def __lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
a = torch_device == '''cpu'''
a = False
self._test_attention_slicing_forward_pass(
test_max_difference=__UpperCAmelCase , test_mean_pixel_difference=__UpperCAmelCase , )
| 0 | 0 |
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = list(range(len(lowerCAmelCase__ ) ) )
UpperCAmelCase_ = [v / w for v, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
index.sort(key=lambda lowerCAmelCase__ : ratio[i] , reverse=lowerCAmelCase__ )
UpperCAmelCase_ = 0
UpperCAmelCase_ = [0] * len(lowerCAmelCase__ )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase_ = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase_ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 241 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''roberta'''
def __init__( self : int , _UpperCAmelCase : List[Any]=50265 , _UpperCAmelCase : str=768 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Tuple=3072 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : Optional[Any]=1e-12 , _UpperCAmelCase : Dict=1 , _UpperCAmelCase : str=0 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Tuple="absolute" , _UpperCAmelCase : Any=True , _UpperCAmelCase : int=None , **_UpperCAmelCase : List[str] , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 241 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = u
for i in range(1 , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = temp * (u - i)
return temp
def __lowerCAmelCase ():
_UpperCAmelCase : List[str] = int(input("enter the numbers of values: " ) )
_UpperCAmelCase : list[list[float]] = []
for _ in range(__lowerCAmelCase ):
y.append([] )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
y[i].append(__lowerCAmelCase )
_UpperCAmelCase : Dict = 0
print("enter the values of parameters in a list: " )
_UpperCAmelCase : int = list(map(__lowerCAmelCase , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(__lowerCAmelCase ):
_UpperCAmelCase : Tuple = float(input() )
_UpperCAmelCase : str = int(input("enter the value to interpolate: " ) )
_UpperCAmelCase : int = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __lowerCAmelCase ):
for j in range(n - i ):
_UpperCAmelCase : int = y[j + 1][i - 1] - y[j][i - 1]
_UpperCAmelCase : List[Any] = y[0][0]
for i in range(1 , __lowerCAmelCase ):
summ += (ucal(__lowerCAmelCase , __lowerCAmelCase ) * y[0][i]) / math.factorial(__lowerCAmelCase )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 234 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 234 | 1 |
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_a : int = ''
for word_or_phrase in separated:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise Exception('join() accepts only strings to be joined' )
joined += word_or_phrase + separator
return joined.strip(lowerCAmelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 107 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__lowerCAmelCase = pytest.mark.integration
__lowerCAmelCase = {'''comet'''}
__lowerCAmelCase = importlib.util.find_spec('''fairseq''') is not None
__lowerCAmelCase = {'''code_eval'''}
__lowerCAmelCase = os.name == '''nt'''
__lowerCAmelCase = {'''bertscore''', '''frugalscore''', '''perplexity'''}
__lowerCAmelCase = importlib.util.find_spec('''transformers''') is not None
def __lowerCamelCase ( lowerCAmelCase_ ) -> Any:
@wraps(lowerCAmelCase_ )
def wrapper(self , lowerCAmelCase_ ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"' )
else:
test_case(self , lowerCAmelCase_ )
return wrapper
def __lowerCamelCase ( lowerCAmelCase_ ) -> Union[str, Any]:
@wraps(lowerCAmelCase_ )
def wrapper(self , lowerCAmelCase_ ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('"test requires transformers"' )
else:
test_case(self , lowerCAmelCase_ )
return wrapper
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
@wraps(lowerCAmelCase_ )
def wrapper(self , lowerCAmelCase_ ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"' )
else:
test_case(self , lowerCAmelCase_ )
return wrapper
def __lowerCamelCase ( ) -> Tuple:
_a : Optional[int] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
@local
class __magic_name__ ( parameterized.TestCase ):
lowerCAmelCase : List[str] = {}
lowerCAmelCase : Optional[int] = None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' )
def __lowercase ( self : Dict ,_UpperCAmelCase : Optional[Any] ):
_a : Tuple = '[...]'
_a : Dict = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' ,_UpperCAmelCase ) ).module_path )
_a : Optional[int] = datasets.load.import_main_class(metric_module.__name__ ,dataset=_UpperCAmelCase )
# check parameters
_a : Optional[int] = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(_UpperCAmelCase ,metric_module.__name__ ):
with self.use_local_metrics():
try:
_a : Optional[int] = doctest.testmod(_UpperCAmelCase ,verbose=_UpperCAmelCase ,raise_on_error=_UpperCAmelCase )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed ,0 )
self.assertGreater(results.attempted ,1 )
@slow
def __lowercase ( self : Tuple ,_UpperCAmelCase : Dict ):
_a : Tuple = '[...]'
_a : Optional[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' ,_UpperCAmelCase ) ).module_path )
# run doctest
with self.use_local_metrics():
_a : int = doctest.testmod(_UpperCAmelCase ,verbose=_UpperCAmelCase ,raise_on_error=_UpperCAmelCase )
self.assertEqual(results.failed ,0 )
self.assertGreater(results.attempted ,1 )
@contextmanager
def __lowercase ( self : List[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : List[str] ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](_UpperCAmelCase ):
yield
else:
yield
@contextmanager
def __lowercase ( self : Optional[int] ):
def load_local_metric(_UpperCAmelCase : Tuple ,*_UpperCAmelCase : Dict ,**_UpperCAmelCase : Tuple ):
return load_metric(os.path.join('metrics' ,_UpperCAmelCase ) ,*_UpperCAmelCase ,**_UpperCAmelCase )
with patch('datasets.load_metric' ) as mock_load_metric:
_a : Any = load_local_metric
yield
@classmethod
def __lowercase ( cls : str ,_UpperCAmelCase : List[str] ):
def wrapper(_UpperCAmelCase : int ):
_a : Optional[Any] = contextmanager(_UpperCAmelCase )
_a : Optional[int] = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[str]:
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags
class __magic_name__ ( _UpperCamelCase ):
def __lowercase ( self : int ,_UpperCAmelCase : Union[str, Any] ):
assert len(input_dict['input_ids'] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor' ) as mock_create_predictor:
_a : int = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Union[str, Any]:
import torch
def bert_cos_score_idf(lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(lowerCAmelCase_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model' ), patch(
'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf:
_a : Any = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Dict:
def load_from_checkpoint(lowerCAmelCase_ ):
class __magic_name__ :
def __lowercase ( self : str ,_UpperCAmelCase : Dict ,*_UpperCAmelCase : int ,**_UpperCAmelCase : str ):
assert len(_UpperCAmelCase ) == 2
_a : Dict = [0.19, 0.92]
return scores, sum(_UpperCAmelCase ) / len(_UpperCAmelCase )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model' ) as mock_download_model:
_a : Any = None
with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint:
_a : Optional[Any] = load_from_checkpoint
yield
def __lowerCamelCase ( ) -> Tuple:
_a : Dict = load_metric(os.path.join('metrics' , 'seqeval' ) )
_a : Optional[int] = 'ERROR'
_a : Optional[Any] = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(lowerCAmelCase_ , match=re.escape(lowerCAmelCase_ ) ):
metric.compute(predictions=[] , references=[] , scheme=lowerCAmelCase_ )
| 107 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__A =None
__A =logging.get_logger(__name__)
__A ={"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__A ={
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
__A ={
"google/bigbird-roberta-base": 4_0_9_6,
"google/bigbird-roberta-large": 4_0_9_6,
"google/bigbird-base-trivia-itc": 4_0_9_6,
}
__A ="▁"
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = BigBirdTokenizer
UpperCamelCase = ["""input_ids""", """attention_mask"""]
UpperCamelCase = []
def __init__( self : str , a_ : Tuple=None , a_ : Optional[int]=None , a_ : List[str]="<unk>" , a_ : Any="<s>" , a_ : Dict="</s>" , a_ : int="<pad>" , a_ : int="[SEP]" , a_ : int="[MASK]" , a_ : Optional[int]="[CLS]" , **a_ : int , ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else bos_token
__UpperCAmelCase : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else eos_token
__UpperCAmelCase : Optional[Any] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else unk_token
__UpperCAmelCase : Optional[Any] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else pad_token
__UpperCAmelCase : Any = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else cls_token
__UpperCAmelCase : Union[str, Any] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Optional[Any] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
super().__init__(
a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , **a_ , )
__UpperCAmelCase : Tuple = vocab_file
__UpperCAmelCase : List[Any] = False if not self.vocab_file else True
def snake_case__ ( self : Union[str, Any] , a_ : List[int] , a_ : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self : List[Any] , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1]
def snake_case__ ( self : Optional[Any] , a_ : List[int] , a_ : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = [self.sep_token_id]
__UpperCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : Any , a_ : str , a_ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCAmelCase : Union[str, Any] = os.path.join(
a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
return (out_vocab_file,)
| 226 |
import numpy as np
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = (0, 0)
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : int = 0
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : List[Any] = 0
def __eq__( self : Optional[int] , a_ : List[Any] ):
'''simple docstring'''
return self.position == cell.position
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
print(self.position )
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : str , a_ : Dict=(5, 5) ):
'''simple docstring'''
__UpperCAmelCase : str = np.zeros(a_ )
__UpperCAmelCase : List[Any] = world_size[0]
__UpperCAmelCase : List[str] = world_size[1]
def snake_case__ ( self : Tuple ):
'''simple docstring'''
print(self.w )
def snake_case__ ( self : Union[str, Any] , a_ : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__UpperCAmelCase : str = cell.position[0]
__UpperCAmelCase : List[Any] = cell.position[1]
__UpperCAmelCase : str = []
for n in neughbour_cord:
__UpperCAmelCase : Optional[Any] = current_x + n[0]
__UpperCAmelCase : Any = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__UpperCAmelCase : int = Cell()
__UpperCAmelCase : List[str] = (x, y)
__UpperCAmelCase : List[Any] = cell
neighbours.append(a_ )
return neighbours
def a ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Optional[Any] = []
_open.append(_UpperCAmelCase )
while _open:
__UpperCAmelCase : str = np.argmin([n.f for n in _open] )
__UpperCAmelCase : Optional[Any] = _open[min_f]
_closed.append(_open.pop(_UpperCAmelCase ) )
if current == goal:
break
for n in world.get_neigbours(_UpperCAmelCase ):
for c in _closed:
if c == n:
continue
__UpperCAmelCase : Optional[Any] = current.g + 1
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = n.position
__UpperCAmelCase , __UpperCAmelCase : Tuple = goal.position
__UpperCAmelCase : Union[str, Any] = (ya - ya) ** 2 + (xa - xa) ** 2
__UpperCAmelCase : Optional[Any] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_UpperCAmelCase )
__UpperCAmelCase : str = []
while current.parent is not None:
path.append(current.position )
__UpperCAmelCase : int = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__A =Gridworld()
# Start position and goal
__A =Cell()
__A =(0, 0)
__A =Cell()
__A =(4, 4)
print(f'''path from {start.position} to {goal.position}''')
__A =astar(world, start, goal)
# Just for visual reasons.
for i in s:
__A =1
print(world.w)
| 226 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 65 |
from manim import *
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Optional[int] = Rectangle(height=0.5 , width=0.5 )
A_ : Union[str, Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
A_ : Any = [mem.copy() for i in range(6 )]
A_ : Tuple = [mem.copy() for i in range(6 )]
A_ : str = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Union[str, Any] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Union[str, Any] = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Optional[Any] = Text('''CPU''' , font_size=24 )
A_ : Union[str, Any] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = [mem.copy() for i in range(1 )]
A_ : Any = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Dict = Text('''GPU''' , font_size=24 )
A_ : List[str] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
gpu.align_to(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
gpu.set_x(gpu.get_x() - 1 )
self.add(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = [mem.copy() for i in range(6 )]
A_ : List[str] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Union[str, Any] = Text('''Model''' , font_size=24 )
A_ : List[Any] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.play(
Create(_SCREAMING_SNAKE_CASE , run_time=1 ) , Create(_SCREAMING_SNAKE_CASE , run_time=1 ) , Create(_SCREAMING_SNAKE_CASE , run_time=1 ) , )
A_ : int = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
A_ : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A_ : Any = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE , run_time=2.5 ) , Write(_SCREAMING_SNAKE_CASE ) , Write(_SCREAMING_SNAKE_CASE ) )
self.add(_SCREAMING_SNAKE_CASE )
A_ : Dict = []
A_ : int = []
A_ : Optional[Any] = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
A_ : Union[str, Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(_SCREAMING_SNAKE_CASE , opacity=0.7 )
cpu_target.move_to(_SCREAMING_SNAKE_CASE )
cpu_target.generate_target()
A_ : Union[str, Any] = 0.4_6 / 4
A_ : Any = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=_SCREAMING_SNAKE_CASE )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_SCREAMING_SNAKE_CASE , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_SCREAMING_SNAKE_CASE , buff=0.0 )
cpu_targs.append(_SCREAMING_SNAKE_CASE )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_SCREAMING_SNAKE_CASE ) )
second_animations.append(MoveToTarget(_SCREAMING_SNAKE_CASE , run_time=1.5 ) )
self.play(*_SCREAMING_SNAKE_CASE )
self.play(*_SCREAMING_SNAKE_CASE )
self.wait()
| 65 | 1 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
snake_case : List[str] = '''pytorch_model.bin'''
snake_case : Optional[Any] = '''pytorch_model.bin.index.json'''
snake_case : Optional[Any] = '''adapter_config.json'''
snake_case : str = '''adapter_model.bin'''
snake_case : Dict = '''adapter_model.safetensors'''
snake_case : List[str] = '''tf_model.h5'''
snake_case : int = '''tf_model.h5.index.json'''
snake_case : Union[str, Any] = '''model.ckpt'''
snake_case : Dict = '''flax_model.msgpack'''
snake_case : int = '''flax_model.msgpack.index.json'''
snake_case : List[Any] = '''model.safetensors'''
snake_case : Tuple = '''model.safetensors.index.json'''
snake_case : Any = '''config.json'''
snake_case : str = '''preprocessor_config.json'''
snake_case : Tuple = FEATURE_EXTRACTOR_NAME
snake_case : Tuple = '''generation_config.json'''
snake_case : Dict = '''modelcard.json'''
snake_case : Tuple = '''▁'''
snake_case : Optional[Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
snake_case : Optional[int] = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
snake_case : List[Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
snake_case : Any = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __lowercase ( __lowerCAmelCase : int ):
if version.parse(__lowerCAmelCase ) < version.parse(__lowerCAmelCase ):
if "dev" in min_version:
a__ = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
a__ = F'This example requires a minimum version of {min_version},'
error_message += F' but the version found is {__version__}.\n'
raise ImportError(
error_message
+ 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '
'versions of HuggingFace Transformers.' )
| 240 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : List[Any] = ['''pixel_values''']
def __init__( self :Optional[int] ,__snake_case :bool = True ,__snake_case :Optional[Dict[str, int]] = None ,__snake_case :PILImageResampling = PILImageResampling.BICUBIC ,__snake_case :bool = True ,__snake_case :bool = True ,__snake_case :Union[int, float] = 1 / 2_55 ,__snake_case :Dict[str, int] = None ,__snake_case :bool = True ,__snake_case :Optional[Union[float, List[float]]] = None ,__snake_case :Optional[Union[float, List[float]]] = None ,**__snake_case :Dict ,) -> None:
super().__init__(**__snake_case )
a__ = size if size is not None else {'height': 2_24, 'width': 2_24}
a__ = get_size_dict(__snake_case )
a__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
a__ = get_size_dict(__snake_case ,default_to_square=__snake_case ,param_name='crop_size' )
a__ = do_resize
a__ = do_rescale
a__ = do_normalize
a__ = do_center_crop
a__ = crop_size
a__ = size
a__ = resample
a__ = rescale_factor
a__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCamelCase__( self :Dict ,__snake_case :np.ndarray ,__snake_case :Dict[str, int] ,__snake_case :PILImageResampling = PILImageResampling.BILINEAR ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,**__snake_case :List[Any] ,) -> np.ndarray:
a__ = get_size_dict(__snake_case )
if "shortest_edge" in size:
a__ = get_resize_output_image_size(__snake_case ,size=size['shortest_edge'] ,default_to_square=__snake_case )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
a__ = (size['height'], size['width'])
else:
raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(__snake_case ,size=__snake_case ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Dict ,__snake_case :np.ndarray ,__snake_case :Dict[str, int] ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,**__snake_case :Any ,) -> np.ndarray:
a__ = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(__snake_case ,size=(size['height'], size['width']) ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :List[Any] ,__snake_case :np.ndarray ,__snake_case :float ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,**__snake_case :int ) -> np.ndarray:
return rescale(__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,__snake_case :np.ndarray ,__snake_case :Union[float, List[float]] ,__snake_case :Union[float, List[float]] ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,**__snake_case :Any ,) -> np.ndarray:
return normalize(__snake_case ,mean=__snake_case ,std=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Any ,__snake_case :ImageInput ,__snake_case :Optional[bool] = None ,__snake_case :Dict[str, int] = None ,__snake_case :PILImageResampling = None ,__snake_case :bool = None ,__snake_case :int = None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[float] = None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[float, List[float]]] = None ,__snake_case :Optional[Union[float, List[float]]] = None ,__snake_case :Optional[Union[str, TensorType]] = None ,__snake_case :Union[str, ChannelDimension] = ChannelDimension.FIRST ,**__snake_case :Optional[int] ,) -> BatchFeature:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = do_normalize if do_normalize is not None else self.do_normalize
a__ = do_center_crop if do_center_crop is not None else self.do_center_crop
a__ = crop_size if crop_size is not None else self.crop_size
a__ = get_size_dict(__snake_case ,param_name='crop_size' ,default_to_square=__snake_case )
a__ = resample if resample is not None else self.resample
a__ = rescale_factor if rescale_factor is not None else self.rescale_factor
a__ = image_mean if image_mean is not None else self.image_mean
a__ = image_std if image_std is not None else self.image_std
a__ = size if size is not None else self.size
a__ = get_size_dict(__snake_case )
if not is_batched(__snake_case ):
a__ = [images]
if not valid_images(__snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
a__ = [self.resize(image=__snake_case ,size=__snake_case ,resample=__snake_case ) for image in images]
if do_center_crop:
a__ = [self.center_crop(image=__snake_case ,size=__snake_case ) for image in images]
if do_rescale:
a__ = [self.rescale(image=__snake_case ,scale=__snake_case ) for image in images]
if do_normalize:
a__ = [self.normalize(image=__snake_case ,mean=__snake_case ,std=__snake_case ) for image in images]
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'pixel_values': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 240 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : List[Any] ) -> bool:
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 | '''simple docstring'''
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
SCREAMING_SNAKE_CASE_: Dict =True
except ImportError:
SCREAMING_SNAKE_CASE_: str =False
try:
from torch.hub import _get_torch_home
SCREAMING_SNAKE_CASE_: Optional[Any] =_get_torch_home()
except ImportError:
SCREAMING_SNAKE_CASE_: Union[str, Any] =os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
SCREAMING_SNAKE_CASE_: int =os.path.join(torch_cache_home, 'transformers')
SCREAMING_SNAKE_CASE_: Tuple ='https://cdn.huggingface.co'
SCREAMING_SNAKE_CASE_: str ='https://s3.amazonaws.com/models.huggingface.co/bert'
SCREAMING_SNAKE_CASE_: str ='/'.join(str(Path(__file__).resolve()).split('/')[:-1])
SCREAMING_SNAKE_CASE_: Optional[Any] =os.path.join(PATH, 'config.yaml')
SCREAMING_SNAKE_CASE_: Optional[Any] =os.path.join(PATH, 'attributes.txt')
SCREAMING_SNAKE_CASE_: Any =os.path.join(PATH, 'objects.txt')
SCREAMING_SNAKE_CASE_: Optional[int] =os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
SCREAMING_SNAKE_CASE_: int =os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
SCREAMING_SNAKE_CASE_: List[str] =os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
SCREAMING_SNAKE_CASE_: str ='pytorch_model.bin'
SCREAMING_SNAKE_CASE_: Dict ='config.yaml'
def lowerCAmelCase_ ( snake_case_ : Optional[int]=OBJECTS , snake_case_ : Optional[Any]=ATTRIBUTES ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
UpperCAmelCase_ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = OrderedDict()
with open(snake_case_ , "rb" ) as f:
UpperCAmelCase_ = pkl.load(snake_case_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
UpperCAmelCase_ = ckp.pop(snake_case_ )
if isinstance(snake_case_ , np.ndarray ):
UpperCAmelCase_ = torch.tensor(snake_case_ )
else:
assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ )
UpperCAmelCase_ = v
return r
class __A :
a__ : Optional[Any] = {}
def __init__(self : Union[str, Any] , __a : dict , __a : str = "root" , __a : str=0 ):
UpperCAmelCase_ = name
UpperCAmelCase_ = level
UpperCAmelCase_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
UpperCAmelCase_ = copy.deepcopy(__a )
UpperCAmelCase_ = copy.deepcopy(__a )
if isinstance(__a , __a ):
UpperCAmelCase_ = Config(__a , name=__a , level=level + 1 )
UpperCAmelCase_ = v
setattr(self , __a , __a )
UpperCAmelCase_ = d
def __repr__(self : List[Any] ):
return str(list((self._pointer.keys()) ) )
def __setattr__(self : int , __a : str , __a : Dict ):
UpperCAmelCase_ = val
UpperCAmelCase_ = val
UpperCAmelCase_ = key.split("." )
UpperCAmelCase_ = len(__a ) - 1
UpperCAmelCase_ = self._pointer
if len(__a ) > 1:
for i, l in enumerate(__a ):
if hasattr(self , __a ) and isinstance(getattr(self , __a ) , __a ):
setattr(getattr(self , __a ) , ".".join(levels[i:] ) , __a )
if l == last_level:
UpperCAmelCase_ = val
else:
UpperCAmelCase_ = pointer[l]
def _lowercase (self : Optional[Any] ):
return self._pointer
def _lowercase (self : int , __a : Union[str, Any] , __a : str ):
with open(f"""{file_name}""" , "w" ) as stream:
dump(__a , __a )
def _lowercase (self : Any , __a : Optional[Any] , __a : List[str] ):
with open(f"""{file_name}""" , "w" ) as stream:
json.dump(__a , __a )
@staticmethod
def _lowercase (__a : str ):
with open(__a ) as stream:
UpperCAmelCase_ = load(__a , Loader=__a )
return data
def __str__(self : Dict ):
UpperCAmelCase_ = " "
if self._name != "root":
UpperCAmelCase_ = f"""{t * (self._level-1)}{self._name}:\n"""
else:
UpperCAmelCase_ = ""
UpperCAmelCase_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__a , __a ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(__a ).__name__})\n"""
UpperCAmelCase_ = level
return r[:-1]
@classmethod
def _lowercase (cls : Tuple , __a : str , **__a : Dict ):
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(__a , **__a )
return cls(__a )
@classmethod
def _lowercase (cls : Any , __a : str , **__a : Dict ):
UpperCAmelCase_ = kwargs.pop("cache_dir" , __a )
UpperCAmelCase_ = kwargs.pop("force_download" , __a )
UpperCAmelCase_ = kwargs.pop("resume_download" , __a )
UpperCAmelCase_ = kwargs.pop("proxies" , __a )
UpperCAmelCase_ = kwargs.pop("local_files_only" , __a )
if os.path.isdir(__a ):
UpperCAmelCase_ = os.path.join(__a , __a )
elif os.path.isfile(__a ) or is_remote_url(__a ):
UpperCAmelCase_ = pretrained_model_name_or_path
else:
UpperCAmelCase_ = hf_bucket_url(__a , filename=__a , use_cdn=__a )
try:
# Load from URL or cache if already cached
UpperCAmelCase_ = cached_path(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
UpperCAmelCase_ = Config.load_yaml(__a )
except EnvironmentError:
UpperCAmelCase_ = "Can't load config for"
raise EnvironmentError(__a )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(__a ), kwargs
def lowerCAmelCase_ ( snake_case_ : str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = torch.load("dump.pt" , map_location=in_tensor.device )
UpperCAmelCase_ = in_tensor.numpy()
UpperCAmelCase_ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %"""
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = urlparse(snake_case_ )
return parsed.scheme in ("http", "https")
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , snake_case_ : Optional[int]=True ) -> str:
'''simple docstring'''
UpperCAmelCase_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
UpperCAmelCase_ = "/" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int]=None , snake_case_ : List[Any]=0 , snake_case_ : int=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case_ , snake_case_ ):
ua += "; " + "; ".join("{}/{}".format(snake_case_ , snake_case_ ) for k, v in user_agent.items() )
elif isinstance(snake_case_ , snake_case_ ):
ua += "; " + user_agent
UpperCAmelCase_ = {"user-agent": ua}
if resume_size > 0:
UpperCAmelCase_ = "bytes=%d-" % (resume_size,)
UpperCAmelCase_ = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ )
if response.status_code == 4_16: # Range not satisfiable
return
UpperCAmelCase_ = response.headers.get("Content-Length" )
UpperCAmelCase_ = resume_size + int(snake_case_ ) if content_length is not None else None
UpperCAmelCase_ = tqdm(
unit="B" , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc="Downloading" , )
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case_ ) )
temp_file.write(snake_case_ )
progress.close()
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : str=None , snake_case_ : List[str]=False , snake_case_ : List[str]=None , snake_case_ : int=10 , snake_case_ : Any=False , snake_case_ : int=None , snake_case_ : str=False , ) -> str:
'''simple docstring'''
if cache_dir is None:
UpperCAmelCase_ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ = str(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
UpperCAmelCase_ = None
if not local_files_only:
try:
UpperCAmelCase_ = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ )
if response.status_code == 2_00:
UpperCAmelCase_ = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
UpperCAmelCase_ = url_to_filename(snake_case_ , snake_case_ )
# get cache path to put the file
UpperCAmelCase_ = os.path.join(snake_case_ , snake_case_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case_ ):
return cache_path
else:
UpperCAmelCase_ = [
file
for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(snake_case_ ) > 0:
return os.path.join(snake_case_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(snake_case_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
UpperCAmelCase_ = cache_path + ".lock"
with FileLock(snake_case_ ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
UpperCAmelCase_ = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(snake_case_ , "a+b" ) as f:
yield f
UpperCAmelCase_ = _resumable_file_manager
if os.path.exists(snake_case_ ):
UpperCAmelCase_ = os.stat(snake_case_ ).st_size
else:
UpperCAmelCase_ = 0
else:
UpperCAmelCase_ = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ )
UpperCAmelCase_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" , snake_case_ , temp_file.name , )
http_get(
snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , )
os.replace(temp_file.name , snake_case_ )
UpperCAmelCase_ = {"url": url, "etag": etag}
UpperCAmelCase_ = cache_path + ".json"
with open(snake_case_ , "w" ) as meta_file:
json.dump(snake_case_ , snake_case_ )
return cache_path
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Any=None ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = url.encode("utf-8" )
UpperCAmelCase_ = shaaaa(snake_case_ )
UpperCAmelCase_ = url_hash.hexdigest()
if etag:
UpperCAmelCase_ = etag.encode("utf-8" )
UpperCAmelCase_ = shaaaa(snake_case_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Tuple=None , snake_case_ : int=False , snake_case_ : Any=None , snake_case_ : List[Any]=False , snake_case_ : Any=None , snake_case_ : Any=False , snake_case_ : List[str]=False , snake_case_ : str=False , ) -> Union[str, Any]:
'''simple docstring'''
if cache_dir is None:
UpperCAmelCase_ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ = str(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ = str(snake_case_ )
if is_remote_url(snake_case_ ):
# URL, so get it from the cache (downloading if necessary)
UpperCAmelCase_ = get_from_cache(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , )
elif os.path.exists(snake_case_ ):
# File, and it exists.
UpperCAmelCase_ = url_or_filename
elif urlparse(snake_case_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(snake_case_ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(snake_case_ ) )
if extract_compressed_file:
if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
UpperCAmelCase_ , UpperCAmelCase_ = os.path.split(snake_case_ )
UpperCAmelCase_ = output_file.replace("." , "-" ) + "-extracted"
UpperCAmelCase_ = os.path.join(snake_case_ , snake_case_ )
if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
UpperCAmelCase_ = output_path + ".lock"
with FileLock(snake_case_ ):
shutil.rmtree(snake_case_ , ignore_errors=snake_case_ )
os.makedirs(snake_case_ )
if is_zipfile(snake_case_ ):
with ZipFile(snake_case_ , "r" ) as zip_file:
zip_file.extractall(snake_case_ )
zip_file.close()
elif tarfile.is_tarfile(snake_case_ ):
UpperCAmelCase_ = tarfile.open(snake_case_ )
tar_file.extractall(snake_case_ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(snake_case_ ) )
return output_path_extracted
return output_path
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Optional[int]="," ) -> int:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as f:
UpperCAmelCase_ = eval(f.read() )
else:
UpperCAmelCase_ = requests.get(snake_case_ )
try:
UpperCAmelCase_ = requests.json()
except Exception:
UpperCAmelCase_ = req.content.decode()
assert data is not None, "could not connect"
try:
UpperCAmelCase_ = eval(snake_case_ )
except Exception:
UpperCAmelCase_ = data.split("\n" )
req.close()
return data
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = requests.get(snake_case_ )
UpperCAmelCase_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case_ )
with open(snake_case_ , "rb" ) as stream:
UpperCAmelCase_ = pkl.load(snake_case_ )
UpperCAmelCase_ = weights.pop("model" )
UpperCAmelCase_ = {}
for k, v in model.items():
UpperCAmelCase_ = torch.from_numpy(snake_case_ )
if "running_var" in k:
UpperCAmelCase_ = torch.tensor([0] )
UpperCAmelCase_ = k.replace("running_var" , "num_batches_tracked" )
UpperCAmelCase_ = zero
return new
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
print(f"""{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb""" )
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Any="RGB" ) -> Dict:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
UpperCAmelCase_ = cva.imread(snake_case_ )
else:
UpperCAmelCase_ = get_image_from_url(snake_case_ )
assert img is not None, f"""could not connect to: {im}"""
UpperCAmelCase_ = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
UpperCAmelCase_ = img[:, :, ::-1]
return img
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Union[str, Any]=1 ) -> str:
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ ))
| 106 | 0 |
"""simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ = 16
lowercase__ = 32
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 16 ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ : str = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCAmelCase_ : List[Any] = DatasetDict(
{
"train": dataset["train"].select(__UpperCamelCase ),
"validation": dataset["train"].select(__UpperCamelCase ),
"test": dataset["validation"],
} )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ : Optional[int] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ : Dict = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ : List[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ : Any = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ : int = 8
else:
lowerCAmelCase_ : List[Any] = None
return tokenizer.pad(
__UpperCamelCase , padding="longest" , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCAmelCase_ : str = DataLoader(
tokenized_datasets["train"] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = DataLoader(
tokenized_datasets["validation"] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
lowerCAmelCase_ : Tuple = DataLoader(
tokenized_datasets["test"] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader, test_dataloader
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Tuple = []
# Download the dataset
lowerCAmelCase_ : str = load_dataset("glue" , "mrpc" )
# Create our splits
lowerCAmelCase_ : Optional[int] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
lowerCAmelCase_ : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ : Optional[int] = config["lr"]
lowerCAmelCase_ : int = int(config["num_epochs"] )
lowerCAmelCase_ : List[Any] = int(config["seed"] )
lowerCAmelCase_ : Tuple = int(config["batch_size"] )
lowerCAmelCase_ : Optional[int] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase_ : Dict = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase_ : Any = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(__UpperCamelCase )
# New Code #
# Create our folds:
lowerCAmelCase_ : Dict = kfold.split(np.zeros(datasets["train"].num_rows ) , datasets["train"]["label"] )
lowerCAmelCase_ : str = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__UpperCamelCase ):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = get_fold_dataloaders(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ : List[str] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ : Optional[int] = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
lowerCAmelCase_ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Any = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase_ : Any = model(**__UpperCamelCase )
lowerCAmelCase_ : List[str] = outputs.loss
lowerCAmelCase_ : Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ : Optional[int] = model(**__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
lowerCAmelCase_ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __UpperCamelCase )
# New Code #
# We also run predictions on the test set at the very end
lowerCAmelCase_ : List[Any] = []
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = model(**__UpperCamelCase )
lowerCAmelCase_ : Dict = outputs.logits
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__UpperCamelCase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
lowerCAmelCase_ : Tuple = torch.cat(__UpperCamelCase , dim=0 )
lowerCAmelCase_ : str = torch.stack(__UpperCamelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
lowerCAmelCase_ : Union[str, Any] = metric.compute(predictions=__UpperCamelCase , references=__UpperCamelCase )
accelerator.print("Average test metrics from all folds:" , __UpperCamelCase )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
lowerCAmelCase_ : Dict = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
# New Code #
parser.add_argument("--num_folds" , type=__UpperCamelCase , default=3 , help="The number of splits to perform across the dataset" )
lowerCAmelCase_ : str = parser.parse_args()
lowerCAmelCase_ : Optional[Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 241 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Union[str, Any] = ["""flax"""]
def __init__( self : Dict , *a_ : Optional[Any] , **a_ : List[str] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Optional[Any] , *a_ : Union[str, Any] , **a_ : Optional[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : int , *a_ : Union[str, Any] , **a_ : Any ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[Any] = ["""flax"""]
def __init__( self : Dict , *a_ : Optional[Any] , **a_ : Optional[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : str , *a_ : Union[str, Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[Any] , *a_ : Optional[Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Dict = ["""flax"""]
def __init__( self : Any , *a_ : Optional[int] , **a_ : str ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Dict , *a_ : Tuple , **a_ : Dict ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Union[str, Any] , *a_ : Any , **a_ : Union[str, Any] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Optional[Any] = ["""flax"""]
def __init__( self : str , *a_ : Optional[int] , **a_ : Optional[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Dict , *a_ : Dict , **a_ : str ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[Any] , *a_ : Optional[int] , **a_ : List[str] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Optional[Any] = ["""flax"""]
def __init__( self : Optional[Any] , *a_ : Optional[Any] , **a_ : Optional[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : str , *a_ : Optional[Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[str] , *a_ : Union[str, Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[Any] = ["""flax"""]
def __init__( self : Union[str, Any] , *a_ : Dict , **a_ : Any ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Tuple , *a_ : Optional[Any] , **a_ : Tuple ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Optional[int] , *a_ : List[Any] , **a_ : Any ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[str] = ["""flax"""]
def __init__( self : Union[str, Any] , *a_ : str , **a_ : Any ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Optional[Any] , *a_ : Any , **a_ : Tuple ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Optional[Any] , *a_ : Optional[int] , **a_ : str ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : int = ["""flax"""]
def __init__( self : Dict , *a_ : str , **a_ : int ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : str , *a_ : List[Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[Any] , *a_ : List[Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Tuple = ["""flax"""]
def __init__( self : Any , *a_ : Any , **a_ : int ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Dict , *a_ : Tuple , **a_ : Optional[int] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[Any] , *a_ : Dict , **a_ : Dict ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Any = ["""flax"""]
def __init__( self : Union[str, Any] , *a_ : Any , **a_ : List[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Dict , *a_ : List[Any] , **a_ : Optional[int] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : int , *a_ : List[Any] , **a_ : Tuple ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Tuple = ["""flax"""]
def __init__( self : Tuple , *a_ : Optional[int] , **a_ : Union[str, Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : int , *a_ : List[str] , **a_ : Optional[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Union[str, Any] , *a_ : Any , **a_ : Any ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[str] = ["""flax"""]
def __init__( self : Optional[Any] , *a_ : Optional[Any] , **a_ : Dict ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[str] , *a_ : int , **a_ : List[str] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[str] , *a_ : int , **a_ : str ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Any = ["""flax"""]
def __init__( self : List[str] , *a_ : Optional[Any] , **a_ : List[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : int , *a_ : Optional[int] , **a_ : Dict ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[str] , *a_ : Union[str, Any] , **a_ : Union[str, Any] ):
requires_backends(cls , ["flax"] )
| 241 | 1 |
def snake_case_ (__A : str ) -> bool:
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def snake_case_ (__A : str ) -> bool:
__lowerCAmelCase : List[str] = credit_card_number
__lowerCAmelCase : Dict = 0
__lowerCAmelCase : Dict = len(__A ) - 2
for i in range(__A , -1 , -2 ):
# double the value of every second digit
__lowerCAmelCase : List[str] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 1_0
digit += 1
__lowerCAmelCase : Tuple = cc_number[:i] + str(__A ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__A ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 1_0 == 0
def snake_case_ (__A : str ) -> bool:
__lowerCAmelCase : List[str] = f'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(f'''{error_message} it has nonnumerical characters.''' )
return False
if not 1_3 <= len(__A ) <= 1_6:
print(f'''{error_message} of its length.''' )
return False
if not validate_initial_digits(__A ):
print(f'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(__A ):
print(f'''{error_message} it fails the Luhn check.''' )
return False
print(f'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 139 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : str=2 , lowerCAmelCase : Optional[Any]=56 , lowerCAmelCase : Any=True , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Any=True , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Tuple=99 , lowerCAmelCase : Optional[Any]=32 , lowerCAmelCase : List[str]=2 , lowerCAmelCase : int=2 , lowerCAmelCase : str=7 , lowerCAmelCase : List[Any]="gelu_new" , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Optional[Any]=5_12 , lowerCAmelCase : Dict=16 , lowerCAmelCase : int=2 , lowerCAmelCase : Optional[Any]=0.02 , lowerCAmelCase : Tuple=4 , lowerCAmelCase : Union[str, Any]="block_sparse" , lowerCAmelCase : List[str]=True , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : List[Any]=3 , ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : Union[str, Any] = batch_size
__lowerCAmelCase : List[Any] = seq_length
__lowerCAmelCase : int = is_training
__lowerCAmelCase : Union[str, Any] = use_attention_mask
__lowerCAmelCase : Tuple = use_token_type_ids
__lowerCAmelCase : Union[str, Any] = use_labels
__lowerCAmelCase : List[str] = vocab_size
__lowerCAmelCase : int = hidden_size
__lowerCAmelCase : Tuple = num_hidden_layers
__lowerCAmelCase : List[str] = num_attention_heads
__lowerCAmelCase : Optional[Any] = intermediate_size
__lowerCAmelCase : List[Any] = hidden_act
__lowerCAmelCase : Optional[Any] = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : str = type_vocab_size
__lowerCAmelCase : List[Any] = type_sequence_label_size
__lowerCAmelCase : Tuple = initializer_range
__lowerCAmelCase : str = num_choices
__lowerCAmelCase : Any = rescale_embeddings
__lowerCAmelCase : str = attention_type
__lowerCAmelCase : List[Any] = use_bias
__lowerCAmelCase : List[str] = block_size
__lowerCAmelCase : Union[str, Any] = num_random_blocks
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[Any] = None
if self.use_attention_mask:
__lowerCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Any = None
if self.use_token_type_ids:
__lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : List[Any] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Any = config_and_inputs
__lowerCAmelCase : Union[str, Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[Any] =(
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowerCamelCase : List[str] =False
lowerCamelCase : Union[str, Any] =False
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
"""simple docstring"""
__lowerCAmelCase : str = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
"""simple docstring"""
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
super().test_hidden_states_output()
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCAmelCase : List[str] = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase : List[str] = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = model_class(lowerCAmelCase )
@jax.jit
def model_jitted(lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any]=None , **lowerCAmelCase : Union[str, Any] ):
return model(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , **lowerCAmelCase )
with self.subTest("""JIT Enabled""" ):
__lowerCAmelCase : str = model_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCAmelCase : List[Any] = model_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any]=1e-5 , lowerCAmelCase : Union[str, Any]="outputs" , lowerCAmelCase : Union[str, Any]=None ) -> Optional[int]:
"""simple docstring"""
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 139 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ["""image_processor""", """tokenizer"""]
SCREAMING_SNAKE_CASE_ : List[str] = """CLIPImageProcessor"""
SCREAMING_SNAKE_CASE_ : Tuple = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Optional[int] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Any=None , **__lowerCamelCase : Any ) -> str:
a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCamelCase , )
a = kwargs.pop("feature_extractor" )
a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self : int , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=None , **__lowerCamelCase : str ) -> Any:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
a = self.tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if images is not None:
a = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if text is not None and images is not None:
a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase ) , tensor_type=__lowerCamelCase )
def __UpperCAmelCase ( self : Dict , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : List[str] ) -> List[str]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Any ) -> str:
a = self.tokenizer.model_input_names
a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , )
return self.image_processor
| 107 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__lowerCAmelCase : List[Any] = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def __magic_name__ ( A : Dict, A : Union[str, Any], A : Optional[int]=None ):
'''simple docstring'''
if rng is None:
a = random.Random()
a = 1
for dim in shape:
total_dims *= dim
a = []
for _ in range(A ):
values.append(rng.randint(0, vocab_size - 1 ) )
a = np.array(A, dtype=jnp.intaa ).reshape(A )
return output
def __magic_name__ ( A : Dict, A : Union[str, Any]=None ):
'''simple docstring'''
a = ids_tensor(A, vocab_size=2, rng=A )
# make sure that at least one token is attended to for each batch
a = 1
return attn_mask
@require_flax
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Any = ()
def __UpperCAmelCase ( self : int ) -> List[str]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
a = 2
a = inputs["input_ids"].shape[-1] // 2
a = inputs["input_ids"][:max_batch_size, :sequence_length]
a = jnp.ones_like(__lowerCamelCase )
a = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
a = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
a = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
a = 0
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model_class.__name__[4:] # Skip the "Flax" at the beginning
a = getattr(__lowerCamelCase , __lowerCamelCase )
a = pt_model_class(__lowerCamelCase ).eval()
a = load_flax_weights_in_pytorch_model(__lowerCamelCase , flax_model.params )
a = flax_model.generate(__lowerCamelCase ).sequences
a = pt_model.generate(torch.tensor(__lowerCamelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
a = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
a , a , a , a = self._get_input_ids_and_config()
a = True
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : int ) -> Dict:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
a = 2
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
a = 2
a = 2
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
a , a , a , a = self._get_input_ids_and_config()
a = True
a = max_length
a = 0.8
a = 10
a = 0.3
a = 1
a = 8
a = 9
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a , a , a , a = self._get_input_ids_and_config()
a = max_length
a = 1
a = 8
a = 9
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a , a , a , a = self._get_input_ids_and_config()
a = max_length
a = 2
a = 1
a = 8
a = 9
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
a , a , a , a = self._get_input_ids_and_config()
# pad attention mask on the left
a = attention_mask.at[(0, 0)].set(0 )
a = False
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
a , a , a , a = self._get_input_ids_and_config()
# pad attention mask on the left
a = attention_mask.at[(0, 0)].set(0 )
a = True
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a , a , a , a = self._get_input_ids_and_config()
# pad attention mask on the left
a = attention_mask.at[(0, 0)].set(0 )
a = 2
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
a = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
a = "Hello world"
a = tokenizer(__lowerCamelCase , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCamelCase , "do_samples" ):
model.generate(__lowerCamelCase , do_samples=__lowerCamelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCamelCase , "foo" ):
a = {"foo": "bar"}
model.generate(__lowerCamelCase , **__lowerCamelCase )
| 107 | 1 |
'''simple docstring'''
import math
import sys
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = ''
try:
with open(_A , 'rb' ) as binary_file:
_lowerCAmelCase : Dict = binary_file.read()
for dat in data:
_lowerCAmelCase : Any = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[str] = {'0': '0', '1': '1'}
_lowerCAmelCase : Optional[int] = '', ''
_lowerCAmelCase : Optional[Any] = len(_A )
for i in range(len(_A ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_lowerCAmelCase : Any = lexicon[curr_string]
result += last_match_id
_lowerCAmelCase : Dict = last_match_id + '0'
if math.loga(_A ).is_integer():
_lowerCAmelCase : str = {}
for curr_key in list(_A ):
_lowerCAmelCase : Any = lexicon.pop(_A )
_lowerCAmelCase : Tuple = new_lex
_lowerCAmelCase : List[str] = last_match_id + '1'
index += 1
_lowerCAmelCase : Dict = ''
return result
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = 8
try:
with open(_A , 'wb' ) as opened_file:
_lowerCAmelCase : Tuple = [
to_write[i : i + byte_length]
for i in range(0 , len(_A ) , _A )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_A , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
_lowerCAmelCase : Optional[Any] = data_bits[counter:]
_lowerCAmelCase : str = data_bits[counter + 1 :]
return data_bits
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : List[str] = read_file_binary(_A )
_lowerCAmelCase : List[str] = remove_prefix(_A )
_lowerCAmelCase : str = decompress_data(_A )
write_file_binary(_A , _A )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 350 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowerCAmelCase : List[str] = ''
_lowerCAmelCase : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_A ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_lowerCAmelCase : List[str] = [1 for i in range(len(_A ) )]
# for each character in new_string find corresponding palindromic string
_lowerCAmelCase : Any = 0
for j in range(len(_A ) ):
_lowerCAmelCase : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_A )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowerCAmelCase : List[str] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowerCAmelCase : Optional[Any] = j - k + 1 # noqa: E741
_lowerCAmelCase : int = j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowerCAmelCase : Dict = length[j]
_lowerCAmelCase : Optional[int] = j
# create that string
_lowerCAmelCase : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( __A = 4 ) -> list[list[int]]:
'''simple docstring'''
UpperCAmelCase__ = abs(__A ) or 4
return [[1 + x + y * row_size for x in range(__A )] for y in range(__A )]
def lowerCAmelCase_ ( __A ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(transpose(__A ) )
# OR.. transpose(reverse_column(matrix))
def lowerCAmelCase_ ( __A ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(reverse_column(__A ) )
# OR.. reverse_column(reverse_row(matrix))
def lowerCAmelCase_ ( __A ) -> list[list[int]]:
'''simple docstring'''
return reverse_column(transpose(__A ) )
# OR.. transpose(reverse_row(matrix))
def lowerCAmelCase_ ( __A ) -> list[list[int]]:
'''simple docstring'''
UpperCAmelCase__ = [list(__A ) for x in zip(*__A )]
return matrix
def lowerCAmelCase_ ( __A ) -> list[list[int]]:
'''simple docstring'''
UpperCAmelCase__ = matrix[::-1]
return matrix
def lowerCAmelCase_ ( __A ) -> list[list[int]]:
'''simple docstring'''
UpperCAmelCase__ = [x[::-1] for x in matrix]
return matrix
def lowerCAmelCase_ ( __A ) -> None:
'''simple docstring'''
for i in matrix:
print(*__A )
if __name__ == "__main__":
UpperCamelCase__ = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
UpperCamelCase__ = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
UpperCamelCase__ = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 65 | import math
import random
def lowerCAmelCase_ ( __A, __A = False ) -> float:
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
UpperCamelCase__ = 0.0_2
def lowerCAmelCase_ ( __A, __A ) -> float:
'''simple docstring'''
UpperCAmelCase__ = float(2 * (random.randint(1, 100 )) - 1 )
for _ in range(__A ):
# Forward propagation
UpperCAmelCase__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCAmelCase__ = (expected / 100) - layer_a
# Error delta
UpperCAmelCase__ = layer_1_error * sigmoid_function(__A, __A )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = int(input('Expected value: '))
UpperCamelCase__ = int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 65 | 1 |
import requests
from bsa import BeautifulSoup
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> str:
"""simple docstring"""
snake_case_ = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE , params=SCREAMING_SNAKE_CASE ).content , '''html.parser''' )
snake_case_ = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
snake_case_ = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
UpperCAmelCase = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params)) | 267 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
snake_case_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , '''num_encoder_blocks''' ) )
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=64 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=[2, 2, 2, 2] , _UpperCAmelCase=[8, 4, 2, 1] , _UpperCAmelCase=[16, 32, 64, 1_28] , _UpperCAmelCase=[1, 4, 8, 16] , _UpperCAmelCase=[1, 2, 4, 8] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = num_encoder_blocks
snake_case_ = sr_ratios
snake_case_ = depths
snake_case_ = hidden_sizes
snake_case_ = downsampling_rates
snake_case_ = num_attention_heads
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = scope
def UpperCamelCase__ ( self ):
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = SegformerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = model(_UpperCAmelCase )
snake_case_ = snake_case_ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = self.num_labels
snake_case_ = SegformerForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
snake_case_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = 1
snake_case_ = SegformerForSemanticSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_UpperCAmelCase )
snake_case_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase__ ( self ):
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__snake_case = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase__ ( self ):
snake_case_ = SegformerModelTester(self )
snake_case_ = SegformerConfigTester(self , config_class=_UpperCAmelCase )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_UpperCAmelCase )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def UpperCamelCase__ ( self ):
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCAmelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
for model_class in self.all_model_classes:
snake_case_ = True
snake_case_ = False
snake_case_ = True
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
snake_case_ = outputs.attentions
snake_case_ = sum(self.model_tester.depths )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ = True
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
snake_case_ = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# verify the first attentions (first block, first layer)
snake_case_ = (self.model_tester.image_size // 4) ** 2
snake_case_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
snake_case_ = (self.model_tester.image_size // 32) ** 2
snake_case_ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
snake_case_ = len(_UpperCAmelCase )
# Check attention is always last and order is fine
snake_case_ = True
snake_case_ = True
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + 1 , len(_UpperCAmelCase ) )
snake_case_ = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# verify the first attentions (first block, first layer)
snake_case_ = (self.model_tester.image_size // 4) ** 2
snake_case_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def UpperCamelCase__ ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
snake_case_ = outputs.hidden_states
snake_case_ = self.model_tester.num_encoder_blocks
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
if not self.model_tester.is_training:
return
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCAmelCase ):
continue
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
snake_case_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
snake_case_ = model(**_UpperCAmelCase ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase__ ( self ):
pass
@slow
def UpperCamelCase__ ( self ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = SegformerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCAmelCase ()-> List[str]:
"""simple docstring"""
snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self ):
# only resize + normalize
snake_case_ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_UpperCAmelCase , align=_UpperCAmelCase , do_random_crop=_UpperCAmelCase )
snake_case_ = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
_UpperCAmelCase )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
snake_case_ = encoded_inputs.pixel_values.to(_UpperCAmelCase )
with torch.no_grad():
snake_case_ = model(_UpperCAmelCase )
snake_case_ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
snake_case_ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ):
# only resize + normalize
snake_case_ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_UpperCAmelCase , align=_UpperCAmelCase , do_random_crop=_UpperCAmelCase )
snake_case_ = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(_UpperCAmelCase )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
snake_case_ = encoded_inputs.pixel_values.to(_UpperCAmelCase )
with torch.no_grad():
snake_case_ = model(_UpperCAmelCase )
snake_case_ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
snake_case_ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-1 ) )
@slow
def UpperCamelCase__ ( self ):
# only resize + normalize
snake_case_ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_UpperCAmelCase , align=_UpperCAmelCase , do_random_crop=_UpperCAmelCase )
snake_case_ = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
_UpperCAmelCase )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
snake_case_ = encoded_inputs.pixel_values.to(_UpperCAmelCase )
with torch.no_grad():
snake_case_ = model(_UpperCAmelCase )
snake_case_ = outputs.logits.detach().cpu()
snake_case_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(5_00, 3_00)] )
snake_case_ = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
snake_case_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
snake_case_ = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase ) | 267 | 1 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__lowercase = logging.get_logger(__name__)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = to_pil_image(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :str = pil_image.size
__UpperCamelCase :str = pytesseract.image_to_data(SCREAMING_SNAKE_CASE , lang=SCREAMING_SNAKE_CASE , output_type='''dict''' , config=SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[str] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
__UpperCamelCase :Dict = [idx for idx, word in enumerate(SCREAMING_SNAKE_CASE ) if not word.strip()]
__UpperCamelCase :Union[str, Any] = [word for idx, word in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
__UpperCamelCase :List[Any] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
__UpperCamelCase :Dict = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
__UpperCamelCase :List[str] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
__UpperCamelCase :str = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__UpperCamelCase :int = []
for x, y, w, h in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :int = [x, y, x + w, y + h]
actual_boxes.append(SCREAMING_SNAKE_CASE )
# finally, normalize the bounding boxes
__UpperCamelCase :Optional[int] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : int = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 255 , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = True , __lowercase = None , __lowercase = "" , **__lowercase , ) -> None:
super().__init__(**__lowercase)
__UpperCamelCase :Any = size if size is not None else {'''height''': 224, '''width''': 224}
__UpperCamelCase :str = get_size_dict(__lowercase)
__UpperCamelCase :Tuple = do_resize
__UpperCamelCase :Tuple = size
__UpperCamelCase :Union[str, Any] = resample
__UpperCamelCase :str = do_rescale
__UpperCamelCase :Optional[int] = rescale_value
__UpperCamelCase :List[str] = do_normalize
__UpperCamelCase :Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCamelCase :str = image_std if image_std is not None else IMAGENET_STANDARD_STD
__UpperCamelCase :str = apply_ocr
__UpperCamelCase :Optional[Any] = ocr_lang
__UpperCamelCase :Optional[Any] = tesseract_config
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = PILImageResampling.BILINEAR , __lowercase = None , **__lowercase , ) -> np.ndarray:
__UpperCamelCase :List[str] = get_size_dict(__lowercase)
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""")
__UpperCamelCase :Any = (size['''height'''], size['''width'''])
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase=None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image:
__UpperCamelCase :Optional[Any] = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase :Dict = size if size is not None else self.size
__UpperCamelCase :Union[str, Any] = get_size_dict(__lowercase)
__UpperCamelCase :List[Any] = resample if resample is not None else self.resample
__UpperCamelCase :Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase :List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase :str = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase :Any = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase :List[str] = image_std if image_std is not None else self.image_std
__UpperCamelCase :List[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
__UpperCamelCase :int = ocr_lang if ocr_lang is not None else self.ocr_lang
__UpperCamelCase :Dict = tesseract_config if tesseract_config is not None else self.tesseract_config
__UpperCamelCase :Optional[int] = make_list_of_images(__lowercase)
if not valid_images(__lowercase):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''')
# All transformations expect numpy arrays.
__UpperCamelCase :Any = [to_numpy_array(__lowercase) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''')
__UpperCamelCase :str = []
__UpperCamelCase :Optional[Any] = []
for image in images:
__UpperCamelCase , __UpperCamelCase :Optional[Any] = apply_tesseract(__lowercase , __lowercase , __lowercase)
words_batch.append(__lowercase)
boxes_batch.append(__lowercase)
if do_resize:
__UpperCamelCase :Dict = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase) for image in images]
if do_rescale:
__UpperCamelCase :Tuple = [self.rescale(image=__lowercase , scale=__lowercase) for image in images]
if do_normalize:
__UpperCamelCase :List[str] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase) for image in images]
__UpperCamelCase :Dict = [to_channel_dimension_format(__lowercase , __lowercase) for image in images]
__UpperCamelCase :Any = BatchFeature(data={'''pixel_values''': images} , tensor_type=__lowercase)
if apply_ocr:
__UpperCamelCase :List[Any] = words_batch
__UpperCamelCase :int = boxes_batch
return data
| 43 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def __SCREAMING_SNAKE_CASE ( A_ = 1_00_00_00 , A_ = 10 ):
lowerCAmelCase__ : defaultdict = defaultdict(A_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCAmelCase__ : int = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCAmelCase__ : Tuple = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(A_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 106 | 0 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
A_ : int = '\\n Text data.\n Second line of data.'
A_ : Dict = 'file'
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: Union[str, Any] ) -> List[Any]:
A__ : List[Any] = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
A__ : List[Any] = bytes(lowercase_ , """utf-8""" )
with zstd.open(lowercase_ , """wb""" ) as f:
f.write(lowercase_ )
return path
@pytest.fixture
def UpperCamelCase (lowercase_: Any ) -> Optional[int]:
with open(os.path.join(tmpfs.local_root_dir , lowercase_ ) , """w""" ) as f:
f.write(lowercase_ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def UpperCamelCase (lowercase_: Dict , lowercase_: Tuple , lowercase_: Any , lowercase_: str , lowercase_: str , lowercase_: int ) -> int:
A__ : Dict = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
A__ : int = input_paths[compression_format]
A__ : int = tmp_path / """cache"""
A__ : Dict = DownloadConfig(cache_dir=lowercase_ , extract_compressed_file=lowercase_ )
A__ : int = cached_path(lowercase_ , download_config=lowercase_ )
with open(lowercase_ ) as f:
A__ : int = f.read()
with open(lowercase_ ) as f:
A__ : List[str] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def UpperCamelCase (lowercase_: int , lowercase_: int , lowercase_: int , lowercase_: Union[str, Any] , lowercase_: Any ) -> Union[str, Any]:
A__ : Dict = """custom_cache"""
A__ : Optional[Any] = """custom_extracted_dir"""
A__ : Dict = tmp_path / """custom_extracted_path"""
if default_extracted:
A__ : Optional[int] = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , lowercase_ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(lowercase_ ) )
A__ : Optional[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
A__ : Any = xz_file
A__ : Tuple = (
DownloadConfig(extract_compressed_file=lowercase_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowercase_ )
)
A__ : List[str] = cached_path(lowercase_ , download_config=lowercase_ )
assert Path(lowercase_ ).parent.parts[-2:] == expected
def UpperCamelCase (lowercase_: List[Any] ) -> str:
# absolute path
A__ : Any = str(Path(lowercase_ ).resolve() )
assert cached_path(lowercase_ ) == text_file
# relative path
A__ : Any = str(Path(lowercase_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowercase_ ) == text_file
def UpperCamelCase (lowercase_: Optional[Any] ) -> Any:
# absolute path
A__ : str = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(lowercase_ ):
cached_path(lowercase_ )
# relative path
A__ : Optional[int] = """./__missing_file__.txt"""
with pytest.raises(lowercase_ ):
cached_path(lowercase_ )
def UpperCamelCase (lowercase_: Optional[int] ) -> Union[str, Any]:
A__ : Tuple = get_from_cache(f"""tmp://{tmpfs_file}""" )
with open(lowercase_ ) as f:
A__ : Any = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase_ )
def UpperCamelCase () -> str:
with pytest.raises(lowercase_ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase_ )
def UpperCamelCase (lowercase_: Union[str, Any] ) -> Union[str, Any]:
A__ : Any = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowercase_ ):
http_get("""https://huggingface.co""" , temp_file=lowercase_ )
with pytest.raises(lowercase_ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase_ )
def UpperCamelCase (lowercase_: List[str] ) -> int:
A__ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowercase_ ):
ftp_get("""ftp://huggingface.co""" , temp_file=lowercase_ )
with pytest.raises(lowercase_ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase_ )
def UpperCamelCase (lowercase_: Dict ) -> Dict:
A__ : str = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowercase_ ):
fsspec_get("""s3://huggingface.co""" , temp_file=lowercase_ )
with pytest.raises(lowercase_ ):
fsspec_head("""s3://huggingface.co""" )
| 141 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
A_ : List[Any] = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 141 | 1 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class _snake_case :
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Any=13 ,SCREAMING_SNAKE_CASE__ : int=7 ,SCREAMING_SNAKE_CASE__ : Any=False ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : Any=False ,SCREAMING_SNAKE_CASE__ : Dict=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=19 ,SCREAMING_SNAKE_CASE__ : str=32 ,SCREAMING_SNAKE_CASE__ : str=5 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 ,SCREAMING_SNAKE_CASE__ : List[Any]=37 ,SCREAMING_SNAKE_CASE__ : List[str]="gelu" ,SCREAMING_SNAKE_CASE__ : str=0.1 ,SCREAMING_SNAKE_CASE__ : str=0.1 ,SCREAMING_SNAKE_CASE__ : Tuple=512 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=16 ,SCREAMING_SNAKE_CASE__ : Optional[int]=2 ,SCREAMING_SNAKE_CASE__ : str=0.02 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 ,SCREAMING_SNAKE_CASE__ : Tuple=4 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,):
SCREAMING_SNAKE_CASE:List[str] = parent
SCREAMING_SNAKE_CASE:Optional[Any] = batch_size
SCREAMING_SNAKE_CASE:Tuple = seq_length
SCREAMING_SNAKE_CASE:Tuple = is_training
SCREAMING_SNAKE_CASE:Union[str, Any] = use_input_mask
SCREAMING_SNAKE_CASE:int = use_token_type_ids
SCREAMING_SNAKE_CASE:List[str] = use_labels
SCREAMING_SNAKE_CASE:Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE:List[Any] = hidden_size
SCREAMING_SNAKE_CASE:Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE:Tuple = num_attention_heads
SCREAMING_SNAKE_CASE:int = intermediate_size
SCREAMING_SNAKE_CASE:Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE:Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE:int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE:List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE:str = type_vocab_size
SCREAMING_SNAKE_CASE:Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE:Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE:Optional[int] = num_labels
SCREAMING_SNAKE_CASE:Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE:str = scope
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE:Tuple = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE:Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE:Any = None
SCREAMING_SNAKE_CASE:str = None
SCREAMING_SNAKE_CASE:int = None
if self.use_labels:
SCREAMING_SNAKE_CASE:List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE:List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE:Union[str, Any] = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE:Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE:Dict = EsmConfig(
vocab_size=33 ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,is_folding_model=SCREAMING_SNAKE_CASE__ ,esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False} ,)
return config
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[Any] ):
SCREAMING_SNAKE_CASE:List[Any] = EsmForProteinFolding(config=SCREAMING_SNAKE_CASE__ ).float()
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:str = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[Any] = model(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.positions.shape ,(8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape ,(8, self.batch_size, self.seq_length, 7, 2) )
def __UpperCamelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE:str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
):List[str] = config_and_inputs
SCREAMING_SNAKE_CASE:List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( _a , _a , unittest.TestCase ):
_A : Dict = False
_A : Optional[int] = (EsmForProteinFolding,) if is_torch_available() else ()
_A : Optional[int] = ()
_A : int = {} if is_torch_available() else {}
_A : List[str] = False
def __UpperCamelCase ( self : Dict ):
SCREAMING_SNAKE_CASE:Optional[int] = EsmFoldModelTester(self )
SCREAMING_SNAKE_CASE:str = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,hidden_size=37 )
def __UpperCamelCase ( self : List[str] ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : int ):
SCREAMING_SNAKE_CASE:str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
@unittest.skip("Does not support attention outputs" )
def __UpperCamelCase ( self : Any ):
pass
@unittest.skip
def __UpperCamelCase ( self : Dict ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def __UpperCamelCase ( self : List[Any] ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def __UpperCamelCase ( self : Any ):
pass
@unittest.skip("ESMFold does not support passing input embeds!" )
def __UpperCamelCase ( self : int ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def __UpperCamelCase ( self : Dict ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def __UpperCamelCase ( self : Optional[Any] ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def __UpperCamelCase ( self : List[Any] ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def __UpperCamelCase ( self : str ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def __UpperCamelCase ( self : int ):
pass
@unittest.skip("ESMFold does not output hidden states in the normal way." )
def __UpperCamelCase ( self : List[Any] ):
pass
@unittest.skip("ESMfold does not output hidden states in the normal way." )
def __UpperCamelCase ( self : int ):
pass
@unittest.skip("ESMFold only has one output format." )
def __UpperCamelCase ( self : Optional[int] ):
pass
@unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality" )
def __UpperCamelCase ( self : str ):
pass
@unittest.skip("ESMFold does not support input chunking." )
def __UpperCamelCase ( self : List[str] ):
pass
@unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments." )
def __UpperCamelCase ( self : Union[str, Any] ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def __UpperCamelCase ( self : List[str] ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def __UpperCamelCase ( self : Union[str, Any] ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def __UpperCamelCase ( self : str ):
pass
@unittest.skip("ESMFold doesn't support data parallel." )
def __UpperCamelCase ( self : List[Any] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCamelCase ( self : List[Any] ):
pass
@require_torch
class _snake_case ( _a ):
@slow
def __UpperCamelCase ( self : int ):
SCREAMING_SNAKE_CASE:Dict = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float()
model.eval()
SCREAMING_SNAKE_CASE:str = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
SCREAMING_SNAKE_CASE:List[Any] = model(SCREAMING_SNAKE_CASE__ )["positions"]
SCREAMING_SNAKE_CASE:str = torch.tensor([2.5_828, 0.7_993, -10.9_334] ,dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] ,SCREAMING_SNAKE_CASE__ ,atol=1e-4 ) )
| 139 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:int = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'''{test_file} instead.''' )
SCREAMING_SNAKE_CASE:str = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
SCREAMING_SNAKE_CASE:Dict = components[:-1] + [test_fn.replace(".py" , "" )]
SCREAMING_SNAKE_CASE:str = ".".join(snake_case )
return test_module_path
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = get_module_path(snake_case )
SCREAMING_SNAKE_CASE:List[Any] = importlib.import_module(snake_case )
return test_module
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = []
SCREAMING_SNAKE_CASE:List[Any] = get_test_module(snake_case )
for attr in dir(snake_case ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(snake_case , snake_case ) )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = []
SCREAMING_SNAKE_CASE:int = get_test_module(snake_case )
for attr in dir(snake_case ):
SCREAMING_SNAKE_CASE:Optional[Any] = getattr(snake_case , snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
SCREAMING_SNAKE_CASE:Union[str, Any] = getattr(snake_case , "all_model_classes" , [] )
if len(snake_case ) > 0:
test_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = get_test_classes(snake_case )
SCREAMING_SNAKE_CASE:List[str] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = test_class()
if hasattr(snake_case , "setUp" ):
test.setUp()
SCREAMING_SNAKE_CASE:str = None
if hasattr(snake_case , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
SCREAMING_SNAKE_CASE:Tuple = test.model_tester.__class__
return model_tester
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = get_test_classes(snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:str = get_test_classes_for_model(snake_case , snake_case )
SCREAMING_SNAKE_CASE:Dict = []
for test_class in test_classes:
SCREAMING_SNAKE_CASE:Dict = get_model_tester_from_test_class(snake_case )
if tester_class is not None:
tester_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:str = get_test_classes(snake_case )
SCREAMING_SNAKE_CASE:Dict = {test_class: get_model_tester_from_test_class(snake_case ) for test_class in test_classes}
return test_tester_mapping
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = get_model_classes(snake_case )
SCREAMING_SNAKE_CASE:Optional[int] = {
model_class: get_test_classes_for_model(snake_case , snake_case ) for model_class in model_classes
}
return model_test_mapping
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = get_model_classes(snake_case )
SCREAMING_SNAKE_CASE:Tuple = {
model_class: get_tester_classes_for_model(snake_case , snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def A_ ( snake_case ):
if isinstance(snake_case , snake_case ):
return o
elif isinstance(snake_case , snake_case ):
return o.__name__
elif isinstance(snake_case , (list, tuple) ):
return [to_json(snake_case ) for x in o]
elif isinstance(snake_case , snake_case ):
return {to_json(snake_case ): to_json(snake_case ) for k, v in o.items()}
else:
return o
| 139 | 1 |
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = generate_pascal_triangle(lowerCAmelCase__ )
for row_idx in range(lowerCAmelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowercase = []
for current_row_idx in range(lowerCAmelCase__ ):
lowercase = populate_current_row(lowerCAmelCase__ , lowerCAmelCase__ )
triangle.append(lowerCAmelCase__ )
return triangle
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowercase , lowercase = 1, 1
for current_col_idx in range(1 , lowerCAmelCase__ ):
calculate_current_element(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return current_row
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
'''simple docstring'''
lowercase = triangle[current_row_idx - 1][current_col_idx - 1]
lowercase = triangle[current_row_idx - 1][current_col_idx]
lowercase = above_to_left_elt + above_to_right_elt
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowercase = [[1]]
for row_index in range(1 , lowerCAmelCase__ ):
lowercase = [0] + result[-1] + [0]
lowercase = row_index + 1
# Calculate the number of distinct elements in a row
lowercase = sum(divmod(lowerCAmelCase__ , 2 ) )
lowercase = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
lowercase = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowercase = row_first_half + row_second_half
result.append(lowerCAmelCase__ )
return result
def UpperCamelCase ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
lowercase = f'{func.__name__}({value})'
lowercase = timeit(f'__main__.{call}' , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'{call:38} -- {timing:.4f} seconds' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase__ , lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 97 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = np.inf
def set_batch_size(lowerCAmelCase__ ) -> None:
nonlocal batch_size
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = min(lowerCAmelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = min(lowerCAmelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and feature.dtype == "binary":
lowercase = min(lowerCAmelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(lowerCAmelCase__ , lowerCAmelCase__ )
return None if batch_size is np.inf else batch_size
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__ = None ,A__ = None ,A__ = None ,A__ = False ,A__ = False ,A__ = None ,**A__ ,):
super().__init__(
A__ ,split=A__ ,features=A__ ,cache_dir=A__ ,keep_in_memory=A__ ,streaming=A__ ,num_proc=A__ ,**A__ ,)
lowercase = path_or_paths if isinstance(A__ ,A__) else {self.split: path_or_paths}
lowercase = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
lowercase = Parquet(
cache_dir=A__ ,data_files=A__ ,features=A__ ,hash=A__ ,**A__ ,)
def A__ ( self):
# Build iterable dataset
if self.streaming:
lowercase = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
lowercase = None
lowercase = None
lowercase = None
lowercase = None
self.builder.download_and_prepare(
download_config=A__ ,download_mode=A__ ,verification_mode=A__ ,base_path=A__ ,num_proc=self.num_proc ,)
lowercase = self.builder.as_dataset(
split=self.split ,verification_mode=A__ ,in_memory=self.keep_in_memory)
return dataset
class lowercase :
def __init__( self ,A__ ,A__ ,A__ = None ,**A__ ,):
lowercase = dataset
lowercase = path_or_buf
lowercase = batch_size or get_writer_batch_size(dataset.features)
lowercase = parquet_writer_kwargs
def A__ ( self):
lowercase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike)):
with open(self.path_or_buf ,'''wb+''') as buffer:
lowercase = self._write(file_obj=A__ ,batch_size=A__ ,**self.parquet_writer_kwargs)
else:
lowercase = self._write(file_obj=self.path_or_buf ,batch_size=A__ ,**self.parquet_writer_kwargs)
return written
def A__ ( self ,A__ ,A__ ,**A__):
lowercase = 0
lowercase = parquet_writer_kwargs.pop('''path_or_buf''' ,A__)
lowercase = self.dataset.features.arrow_schema
lowercase = pq.ParquetWriter(A__ ,schema=A__ ,**A__)
for offset in logging.tqdm(
range(0 ,len(self.dataset) ,A__) ,unit='''ba''' ,disable=not logging.is_progress_bar_enabled() ,desc='''Creating parquet from Arrow format''' ,):
lowercase = query_table(
table=self.dataset._data ,key=slice(A__ ,offset + batch_size) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,)
writer.write_table(A__)
written += batch.nbytes
writer.close()
return written
| 97 | 1 |
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class a ( a__ ):
def __init__( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : int=True , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Optional[Any] ):
_UpperCAmelCase = parent
_UpperCAmelCase = config_class
_UpperCAmelCase = has_text_modality
_UpperCAmelCase = kwargs
_UpperCAmelCase = common_properties
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.config_class(**self.inputs_dict )
_UpperCAmelCase = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , msg=f'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(SCREAMING_SNAKE_CASE__ ):
try:
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , msg=f'''`{name} value {idx} expected, but was {getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(SCREAMING_SNAKE_CASE__ ):
try:
_UpperCAmelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , msg=f'''`{name} value {idx} expected, but was {getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.config_class(**self.inputs_dict )
_UpperCAmelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE__ , """config.json""" )
config_first.to_json_file(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = self.config_class.from_json_file(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = self.config_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = self.config_class(**self.inputs_dict )
_UpperCAmelCase = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
config_first.save_pretrained(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = self.config_class.from_pretrained(SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_UpperCAmelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def lowerCAmelCase_ ( self : str ):
if self.config_class.is_composition:
return
_UpperCAmelCase = self.config_class()
self.parent.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = self.config_class(**SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) != value:
wrong_values.append((key, getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), value) )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
_UpperCAmelCase = """\n""".join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' )
def lowerCAmelCase_ ( self : int ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 289 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = 'Hello world! cécé herlolip'
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = FairseqRobertaModel.from_pretrained(_snake_case )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE__ : Any = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ : Any = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLMRobertaXLForSequenceClassification(_snake_case ) if classification_head else XLMRobertaXLForMaskedLM(_snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.classification_heads["""mnli"""].dense.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.bias
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].out_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ : str = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(_snake_case ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_snake_case ) )
else:
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model(_snake_case )[0]
print(our_output.shape ,their_output.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ : Tuple = torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
print("""Do both models output the same tensors?""" ,"""🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(_snake_case ).mkdir(parents=_snake_case ,exist_ok=_snake_case )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 25 | 0 |
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(
A_ ,r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " ,)
class __A ( A_ ):
'''simple docstring'''
def UpperCAmelCase ( self : Any ,_snake_case : Union[str, Any] ) -> np.ndarray:
"""simple docstring"""
if self.framework == "tf":
lowercase__ : Dict = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowercase__ : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=__a )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def UpperCAmelCase ( self : str ,_snake_case : str ) -> np.ndarray:
"""simple docstring"""
lowercase__ : List[Any] = self.get_masked_index(__a )
lowercase__ : Dict = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' ,self.model.base_model_prefix ,f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" ,)
def UpperCAmelCase ( self : Tuple ,_snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
if isinstance(__a ,__a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__a )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[int] ,_snake_case : Dict=None ,**_snake_case : Optional[int] ) -> Dict[str, GenericTensor]:
"""simple docstring"""
if return_tensors is None:
lowercase__ : Dict = self.framework
lowercase__ : List[Any] = self.tokenizer(__a ,return_tensors=__a )
self.ensure_exactly_one_mask_token(__a )
return model_inputs
def UpperCAmelCase ( self : List[Any] ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : int = self.model(**__a )
lowercase__ : Any = model_inputs['''input_ids''']
return model_outputs
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Dict ,_snake_case : str=5 ,_snake_case : int=None ) -> Optional[int]:
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
lowercase__ : int = target_ids.shape[0]
lowercase__ : Tuple = model_outputs['''input_ids'''][0]
lowercase__ : str = model_outputs['''logits''']
if self.framework == "tf":
lowercase__ : Optional[int] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowercase__ : List[Any] = outputs.numpy()
lowercase__ : Optional[int] = outputs[0, masked_index, :]
lowercase__ : str = stable_softmax(__a ,axis=-1 )
if target_ids is not None:
lowercase__ : int = tf.gather_nd(tf.squeeze(__a ,0 ) ,target_ids.reshape(-1 ,1 ) )
lowercase__ : Dict = tf.expand_dims(__a ,0 )
lowercase__ : Dict = tf.math.top_k(__a ,k=__a )
lowercase__ , lowercase__ : str = topk.values.numpy(), topk.indices.numpy()
else:
lowercase__ : int = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=__a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowercase__ : Dict = outputs[0, masked_index, :]
lowercase__ : Tuple = logits.softmax(dim=-1 )
if target_ids is not None:
lowercase__ : Dict = probs[..., target_ids]
lowercase__ , lowercase__ : str = probs.topk(__a )
lowercase__ : int = []
lowercase__ : Union[str, Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() ,predictions.tolist() ) ):
lowercase__ : List[Any] = []
for v, p in zip(_values ,_predictions ):
# Copy is important since we're going to modify this array in place
lowercase__ : Dict = input_ids.numpy().copy()
if target_ids is not None:
lowercase__ : Optional[Any] = target_ids[p].tolist()
lowercase__ : str = p
# Filter padding out:
lowercase__ : Union[str, Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowercase__ : Any = self.tokenizer.decode(__a ,skip_special_tokens=__a )
lowercase__ : Tuple = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(__a )
result.append(__a )
if single_mask:
return result[0]
return result
def UpperCAmelCase ( self : Dict ,_snake_case : Any ,_snake_case : Dict=None ) -> Any:
"""simple docstring"""
if isinstance(__a ,__a ):
lowercase__ : Optional[Any] = [targets]
try:
lowercase__ : Optional[int] = self.tokenizer.get_vocab()
except Exception:
lowercase__ : Tuple = {}
lowercase__ : Dict = []
for target in targets:
lowercase__ : int = vocab.get(__a ,__a )
if id_ is None:
lowercase__ : Union[str, Any] = self.tokenizer(
__a ,add_special_tokens=__a ,return_attention_mask=__a ,return_token_type_ids=__a ,max_length=1 ,truncation=__a ,)['''input_ids''']
if len(__a ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
lowercase__ : Dict = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
lowercase__ : Dict = list(set(__a ) )
if len(__a ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
lowercase__ : List[str] = np.array(__a )
return target_ids
def UpperCAmelCase ( self : str ,_snake_case : Union[str, Any]=None ,_snake_case : Dict=None ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[str] = {}
if targets is not None:
lowercase__ : Tuple = self.get_target_ids(__a ,__a )
lowercase__ : Optional[int] = target_ids
if top_k is not None:
lowercase__ : Union[str, Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' ,self.model.base_model_prefix ,'''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self : Optional[Any] ,_snake_case : str ,*_snake_case : Optional[Any] ,**_snake_case : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : str = super().__call__(__a ,**__a )
if isinstance(__a ,__a ) and len(__a ) == 1:
return outputs[0]
return outputs
| 362 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase_ = logging.get_logger(__name__)
# General docstring
lowerCAmelCase_ = 'RegNetConfig'
# Base docstring
lowerCAmelCase_ = 'facebook/regnet-y-040'
lowerCAmelCase_ = [1, 1_088, 7, 7]
# Image classification docstring
lowerCAmelCase_ = 'facebook/regnet-y-040'
lowerCAmelCase_ = 'tabby, tabby cat'
lowerCAmelCase_ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : int ,_snake_case : int ,_snake_case : int ,_snake_case : int = 3 ,_snake_case : int = 1 ,_snake_case : int = 1 ,_snake_case : Optional[str] = "relu" ,) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
lowercase__ : Tuple = nn.Convad(
_snake_case ,_snake_case ,kernel_size=_snake_case ,stride=_snake_case ,padding=kernel_size // 2 ,groups=_snake_case ,bias=_snake_case ,)
lowercase__ : List[Any] = nn.BatchNormad(_snake_case )
lowercase__ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.convolution(_snake_case )
lowercase__ : Tuple = self.normalization(_snake_case )
lowercase__ : Tuple = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : RegNetConfig ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = RegNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act )
lowercase__ : str = config.num_channels
def UpperCAmelCase ( self : int ,_snake_case : Dict ) -> str:
"""simple docstring"""
lowercase__ : Union[str, Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
lowercase__ : Optional[int] = self.embedder(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : str ,_snake_case : int ,_snake_case : int ,_snake_case : int = 2 ) -> Any:
"""simple docstring"""
super().__init__()
lowercase__ : List[str] = nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ,stride=_snake_case ,bias=_snake_case )
lowercase__ : Any = nn.BatchNormad(_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : Tensor ) -> Tensor:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.convolution(_snake_case )
lowercase__ : Optional[int] = self.normalization(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : int ,_snake_case : int ) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ : Any = nn.AdaptiveAvgPoolad((1, 1) )
lowercase__ : Dict = nn.Sequential(
nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ) ,nn.ReLU() ,nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ) ,nn.Sigmoid() ,)
def UpperCAmelCase ( self : int ,_snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.pooler(_snake_case )
lowercase__ : Union[str, Any] = self.attention(_snake_case )
lowercase__ : List[str] = hidden_state * attention
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 1 ) -> List[str]:
"""simple docstring"""
super().__init__()
lowercase__ : Tuple = in_channels != out_channels or stride != 1
lowercase__ : Optional[int] = max(1 ,out_channels // config.groups_width )
lowercase__ : str = (
RegNetShortCut(_snake_case ,_snake_case ,stride=_snake_case ) if should_apply_shortcut else nn.Identity()
)
lowercase__ : Optional[int] = nn.Sequential(
RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=_snake_case ) ,)
lowercase__ : str = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = hidden_state
lowercase__ : Union[str, Any] = self.layer(_snake_case )
lowercase__ : List[Any] = self.shortcut(_snake_case )
hidden_state += residual
lowercase__ : Optional[int] = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 1 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = in_channels != out_channels or stride != 1
lowercase__ : List[str] = max(1 ,out_channels // config.groups_width )
lowercase__ : Tuple = (
RegNetShortCut(_snake_case ,_snake_case ,stride=_snake_case ) if should_apply_shortcut else nn.Identity()
)
lowercase__ : str = nn.Sequential(
RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ) ,RegNetSELayer(_snake_case ,reduced_channels=int(round(in_channels / 4 ) ) ) ,RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=_snake_case ) ,)
lowercase__ : Optional[Any] = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : str = hidden_state
lowercase__ : Optional[Any] = self.layer(_snake_case )
lowercase__ : int = self.shortcut(_snake_case )
hidden_state += residual
lowercase__ : str = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 2 ,_snake_case : int = 2 ,) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[Any] = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
lowercase__ : Optional[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_snake_case ,_snake_case ,_snake_case ,stride=_snake_case ,) ,*[layer(_snake_case ,_snake_case ,_snake_case ) for _ in range(depth - 1 )] ,)
def UpperCAmelCase ( self : Tuple ,_snake_case : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.layers(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_snake_case : RegNetConfig ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : str = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_snake_case ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
lowercase__ : str = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_snake_case ,config.depths[1:] ):
self.stages.append(RegNetStage(_snake_case ,_snake_case ,_snake_case ,depth=_snake_case ) )
def UpperCAmelCase ( self : List[str] ,_snake_case : Tensor ,_snake_case : bool = False ,_snake_case : bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
lowercase__ : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__ : int = hidden_states + (hidden_state,)
lowercase__ : Any = stage_module(_snake_case )
if output_hidden_states:
lowercase__ : Optional[int] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_snake_case ,hidden_states=_snake_case )
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = RegNetConfig
lowerCAmelCase : List[Any] = "regnet"
lowerCAmelCase : Optional[int] = "pixel_values"
lowerCAmelCase : Union[str, Any] = True
def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
if isinstance(_snake_case ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode='''fan_out''' ,nonlinearity='''relu''' )
elif isinstance(_snake_case ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ,_snake_case : Any=False ) -> Optional[int]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
lowercase__ : str = value
lowerCAmelCase_ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." ,A_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : Any ) -> Tuple:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : Any = config
lowercase__ : List[str] = RegNetEmbeddings(_snake_case )
lowercase__ : Any = RegNetEncoder(_snake_case )
lowercase__ : Dict = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def UpperCAmelCase ( self : Dict ,_snake_case : Tensor ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
lowercase__ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Union[str, Any] = self.embedder(_snake_case )
lowercase__ : List[Any] = self.encoder(
_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case )
lowercase__ : str = encoder_outputs[0]
lowercase__ : Optional[int] = self.pooler(_snake_case )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_snake_case ,pooler_output=_snake_case ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,A_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __A ( A_ ):
'''simple docstring'''
def __init__( self : int ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : Optional[Any] = config.num_labels
lowercase__ : int = RegNetModel(_snake_case )
# classification head
lowercase__ : str = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def UpperCAmelCase ( self : List[Any] ,_snake_case : Optional[torch.FloatTensor] = None ,_snake_case : Optional[torch.LongTensor] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ,) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
lowercase__ : Any = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : List[Any] = self.regnet(_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case )
lowercase__ : List[str] = outputs.pooler_output if return_dict else outputs[1]
lowercase__ : Union[str, Any] = self.classifier(_snake_case )
lowercase__ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__ : List[Any] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__ : Dict = '''single_label_classification'''
else:
lowercase__ : Optional[int] = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase__ : Union[str, Any] = MSELoss()
if self.num_labels == 1:
lowercase__ : List[Any] = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
lowercase__ : Tuple = loss_fct(_snake_case ,_snake_case )
elif self.config.problem_type == "single_label_classification":
lowercase__ : Tuple = CrossEntropyLoss()
lowercase__ : str = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__ : Any = BCEWithLogitsLoss()
lowercase__ : Union[str, Any] = loss_fct(_snake_case ,_snake_case )
if not return_dict:
lowercase__ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_snake_case ,logits=_snake_case ,hidden_states=outputs.hidden_states )
| 302 | 0 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCAmelCase : Dict = logging.get_logger(__name__)
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = question_encoder
__SCREAMING_SNAKE_CASE = generator
__SCREAMING_SNAKE_CASE = self.question_encoder
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
"""simple docstring"""
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = os.path.join(__SCREAMING_SNAKE_CASE , """question_encoder_tokenizer""" )
__SCREAMING_SNAKE_CASE = os.path.join(__SCREAMING_SNAKE_CASE , """generator_tokenizer""" )
self.question_encoder.save_pretrained(__SCREAMING_SNAKE_CASE )
self.generator.save_pretrained(__SCREAMING_SNAKE_CASE )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : int ) -> List[Any]:
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
__SCREAMING_SNAKE_CASE = kwargs.pop("""config""" , __SCREAMING_SNAKE_CASE )
if config is None:
__SCREAMING_SNAKE_CASE = RagConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
__SCREAMING_SNAKE_CASE , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
__SCREAMING_SNAKE_CASE , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE )
def __call__( self : Optional[int] , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.current_tokenizer(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : str ) -> List[str]:
"""simple docstring"""
return self.generator.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.generator.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.question_encoder
def UpperCAmelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.generator
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : str = "longest" , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = True , **__SCREAMING_SNAKE_CASE : Tuple , ) -> BatchEncoding:
"""simple docstring"""
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , __SCREAMING_SNAKE_CASE , )
if max_length is None:
__SCREAMING_SNAKE_CASE = self.current_tokenizer.model_max_length
__SCREAMING_SNAKE_CASE = self(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__SCREAMING_SNAKE_CASE = self.current_tokenizer.model_max_length
__SCREAMING_SNAKE_CASE = self(
text_target=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = labels["""input_ids"""]
return model_inputs
| 267 |
'''simple docstring'''
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = n
__SCREAMING_SNAKE_CASE = [None] * self.n
__SCREAMING_SNAKE_CASE = 0 # index of the first element
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
def __len__( self : Tuple ) -> int:
"""simple docstring"""
return self.size
def UpperCAmelCase__ ( self : Optional[Any] ) -> bool:
"""simple docstring"""
return self.size == 0
def UpperCAmelCase__ ( self : Any ) -> int:
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
"""simple docstring"""
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""" )
__SCREAMING_SNAKE_CASE = data
__SCREAMING_SNAKE_CASE = (self.rear + 1) % self.n
self.size += 1
return self
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
if self.size == 0:
raise Exception("""UNDERFLOW""" )
__SCREAMING_SNAKE_CASE = self.array[self.front]
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = (self.front + 1) % self.n
self.size -= 1
return temp
| 267 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Any , __snake_case : Optional[Any] , __snake_case : Union[str, Any]=13 , __snake_case : Any=7 , __snake_case : Optional[int]=True , __snake_case : Optional[Any]=True , __snake_case : Tuple=True , __snake_case : List[str]=True , __snake_case : List[str]=99 , __snake_case : Dict=32 , __snake_case : Any=5 , __snake_case : Any=4 , __snake_case : Tuple=37 , __snake_case : str="gelu" , __snake_case : Any=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Optional[int]=5_12 , __snake_case : Any=16 , __snake_case : Optional[int]=2 , __snake_case : List[str]=0.02 , __snake_case : Optional[int]=4 , ) -> Tuple:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_attention_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_choices
def lowercase__ ( self : str ) -> Dict:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_attention_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self : List[Any] ) -> Any:
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowercase__ ( self : Tuple ) -> int:
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = True
_lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase ( snake_case_ , unittest.TestCase ):
_lowercase: str = True
_lowercase: Dict = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
_lowerCAmelCase = FlaxBertModelTester(self )
@slow
def lowercase__ ( self : str ) -> List[str]:
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
_lowerCAmelCase = FlaxBertModel.from_pretrained("""bert-base-cased""" )
_lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__snake_case )
| 220 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
A__ : Tuple =None
try:
import msvcrt
except ImportError:
A__ : str =None
try:
import fcntl
except ImportError:
A__ : Dict =None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
A__ : Tuple =OSError
# Data
# ------------------------------------------------
A__ : Optional[Any] =[
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
A__ : Union[str, Any] ='''3.0.12'''
A__ : Optional[Any] =None
def UpperCamelCase__ ( ):
"""simple docstring"""
global _logger
_lowerCAmelCase = _logger or logging.getLogger(__name__ )
return _logger
class UpperCAmelCase ( snake_case_ ):
def __init__( self : Optional[int] , __snake_case : str ) -> Optional[Any]:
_lowerCAmelCase = lock_file
return None
def __str__( self : Union[str, Any] ) -> Any:
_lowerCAmelCase = f"The file lock '{self.lock_file}' could not be acquired."
return temp
class UpperCAmelCase :
def __init__( self : str , __snake_case : str ) -> Tuple:
_lowerCAmelCase = lock
return None
def __enter__( self : Union[str, Any] ) -> Union[str, Any]:
return self.lock
def __exit__( self : Tuple , __snake_case : List[Any] , __snake_case : int , __snake_case : Optional[int] ) -> Union[str, Any]:
self.lock.release()
return None
class UpperCAmelCase :
def __init__( self : List[Any] , __snake_case : str , __snake_case : Tuple=-1 , __snake_case : List[str]=None ) -> Any:
_lowerCAmelCase = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
_lowerCAmelCase = self.hash_filename_if_too_long(__snake_case , __snake_case )
# The path to the lock file.
_lowerCAmelCase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_lowerCAmelCase = None
# The default timeout value.
_lowerCAmelCase = timeout
# We use this lock primarily for the lock counter.
_lowerCAmelCase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_lowerCAmelCase = 0
return None
@property
def lowercase__ ( self : int ) -> List[str]:
return self._lock_file
@property
def lowercase__ ( self : Dict ) -> List[Any]:
return self._timeout
@timeout.setter
def lowercase__ ( self : Tuple , __snake_case : int ) -> Optional[Any]:
_lowerCAmelCase = float(__snake_case )
return None
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
raise NotImplementedError()
def lowercase__ ( self : Optional[Any] ) -> Any:
raise NotImplementedError()
@property
def lowercase__ ( self : str ) -> Dict:
return self._lock_file_fd is not None
def lowercase__ ( self : Union[str, Any] , __snake_case : Union[str, Any]=None , __snake_case : List[str]=0.05 ) -> List[str]:
# Use the default timeout, if no timeout is provided.
if timeout is None:
_lowerCAmelCase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_lowerCAmelCase = id(self )
_lowerCAmelCase = self._lock_file
_lowerCAmelCase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(f"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(__snake_case )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_lowerCAmelCase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowercase__ ( self : Dict , __snake_case : Optional[int]=False ) -> Union[str, Any]:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_lowerCAmelCase = id(self )
_lowerCAmelCase = self._lock_file
logger().debug(f"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
_lowerCAmelCase = 0
logger().debug(f"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : List[str] ) -> Union[str, Any]:
self.acquire()
return self
def __exit__( self : Union[str, Any] , __snake_case : str , __snake_case : Tuple , __snake_case : Any ) -> int:
self.release()
return None
def __del__( self : Optional[Any] ) -> Any:
self.release(force=__snake_case )
return None
def lowercase__ ( self : Optional[Any] , __snake_case : str , __snake_case : int ) -> str:
_lowerCAmelCase = os.path.basename(__snake_case )
if len(__snake_case ) > max_length and max_length > 0:
_lowerCAmelCase = os.path.dirname(__snake_case )
_lowerCAmelCase = str(hash(__snake_case ) )
_lowerCAmelCase = filename[: max_length - len(__snake_case ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__snake_case , __snake_case )
else:
return path
class UpperCAmelCase ( snake_case_ ):
def __init__( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[int]=-1 , __snake_case : str=None ) -> Dict:
from .file_utils import relative_to_absolute_path
super().__init__(__snake_case , timeout=__snake_case , max_filename_length=__snake_case )
_lowerCAmelCase = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def lowercase__ ( self : List[str] ) -> Tuple:
_lowerCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_lowerCAmelCase = os.open(self._lock_file , __snake_case )
except OSError:
pass
else:
try:
msvcrt.locking(__snake_case , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__snake_case )
else:
_lowerCAmelCase = fd
return None
def lowercase__ ( self : List[str] ) -> List[str]:
_lowerCAmelCase = self._lock_file_fd
_lowerCAmelCase = None
msvcrt.locking(__snake_case , msvcrt.LK_UNLCK , 1 )
os.close(__snake_case )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class UpperCAmelCase ( snake_case_ ):
def __init__( self : Dict , __snake_case : List[Any] , __snake_case : List[Any]=-1 , __snake_case : List[str]=None ) -> Dict:
_lowerCAmelCase = os.statvfs(os.path.dirname(__snake_case ) ).f_namemax
super().__init__(__snake_case , timeout=__snake_case , max_filename_length=__snake_case )
def lowercase__ ( self : Optional[int] ) -> List[Any]:
_lowerCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_lowerCAmelCase = os.open(self._lock_file , __snake_case )
try:
fcntl.flock(__snake_case , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__snake_case )
else:
_lowerCAmelCase = fd
return None
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_lowerCAmelCase = self._lock_file_fd
_lowerCAmelCase = None
fcntl.flock(__snake_case , fcntl.LOCK_UN )
os.close(__snake_case )
return None
class UpperCAmelCase ( snake_case_ ):
def lowercase__ ( self : Union[str, Any] ) -> Dict:
_lowerCAmelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_lowerCAmelCase = os.open(self._lock_file , __snake_case )
except OSError:
pass
else:
_lowerCAmelCase = fd
return None
def lowercase__ ( self : Any ) -> Optional[Any]:
os.close(self._lock_file_fd )
_lowerCAmelCase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
A__ : Tuple =None
if msvcrt:
A__ : List[Any] =WindowsFileLock
elif fcntl:
A__ : Tuple =UnixFileLock
else:
A__ : Tuple =SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 220 | 1 |
'''simple docstring'''
from typing import Any
class lowerCAmelCase :
def __init__( self : Union[str, Any] , __lowercase : Any ):
"""simple docstring"""
__lowercase =data
__lowercase =None
class lowerCAmelCase :
def __init__( self : Dict ):
"""simple docstring"""
__lowercase =None
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
__lowercase =self.head
while temp is not None:
print(temp.data , end=' ' )
__lowercase =temp.next
print()
def snake_case ( self : Optional[Any] , __lowercase : Any ):
"""simple docstring"""
__lowercase =Node(__lowercase )
__lowercase =self.head
__lowercase =new_node
def snake_case ( self : List[str] , __lowercase : int , __lowercase : Optional[int] ):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
__lowercase =self.head
while node_a is not None and node_a.data != node_data_a:
__lowercase =node_a.next
__lowercase =self.head
while node_a is not None and node_a.data != node_data_a:
__lowercase =node_a.next
if node_a is None or node_a is None:
return
__lowercase , __lowercase =node_a.data, node_a.data
if __name__ == "__main__":
UpperCAmelCase = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 141 |
'''simple docstring'''
UpperCAmelCase = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
UpperCAmelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
UpperCAmelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 141 | 1 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
SCREAMING_SNAKE_CASE : str = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
SCREAMING_SNAKE_CASE : Dict = json.load(f)
@require_torch
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Dict = FSMTForConditionalGeneration.from_pretrained(lowerCamelCase).to(lowerCamelCase)
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 2_6.0],
['ru-en', 2_2.0],
['en-de', 2_2.0],
['de-en', 2_9.0],
])
@slow
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : int = F'''facebook/wmt19-{pair}'''
_lowercase : Optional[int] = self.get_tokenizer(lowerCamelCase)
_lowercase : Any = self.get_model(lowerCamelCase)
_lowercase : List[Any] = bleu_data[pair]['src']
_lowercase : Optional[Any] = bleu_data[pair]['tgt']
_lowercase : int = tokenizer(lowerCamelCase, return_tensors='pt', truncation=lowerCamelCase, padding='longest').to(lowerCamelCase)
_lowercase : int = model.generate(
input_ids=batch.input_ids, num_beams=8, )
_lowercase : Any = tokenizer.batch_decode(
lowerCamelCase, skip_special_tokens=lowerCamelCase, clean_up_tokenization_spaces=lowerCamelCase)
_lowercase : Optional[int] = calculate_bleu(lowerCamelCase, lowerCamelCase)
print(lowerCamelCase)
self.assertGreaterEqual(scores['bleu'], lowerCamelCase)
| 84 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
_lowercase : List[str] = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=lowerCamelCase, scheduler=lowerCamelCase)
@torch.no_grad()
def __call__( self, lowerCamelCase = 1, lowerCamelCase = None, lowerCamelCase = 0.0, lowerCamelCase = 50, lowerCamelCase = None, lowerCamelCase = "pil", lowerCamelCase = True, ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(self.unet.config.sample_size, lowerCamelCase):
_lowercase : Optional[int] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_lowercase : Union[str, Any] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCamelCase, lowerCamelCase) and len(lowerCamelCase) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowerCamelCase)}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''')
_lowercase : str = randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=self.unet.dtype)
# set step values
self.scheduler.set_timesteps(lowerCamelCase)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
_lowercase : Union[str, Any] = self.unet(lowerCamelCase, lowerCamelCase).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_lowercase : Optional[Any] = self.scheduler.step(
lowerCamelCase, lowerCamelCase, lowerCamelCase, eta=lowerCamelCase, use_clipped_model_output=lowerCamelCase, generator=lowerCamelCase).prev_sample
_lowercase : Any = (image / 2 + 0.5).clamp(0, 1)
_lowercase : str = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowercase : Optional[int] = self.numpy_to_pil(lowerCamelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase)
| 84 | 1 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a ( __a ) -> bool:
'''simple docstring'''
UpperCamelCase__ :int = int(number**0.5 )
return number == sq * sq
def a ( __a , __a , __a , __a , __a , __a ) -> tuple[int, int]:
'''simple docstring'''
UpperCamelCase__ :int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCamelCase__ :int = x_den * y_den * z_den
UpperCamelCase__ :int = gcd(__a , __a )
top //= hcf
bottom //= hcf
return top, bottom
def a ( __a = 35 ) -> int:
'''simple docstring'''
UpperCamelCase__ :set = set()
UpperCamelCase__ :int
UpperCamelCase__ :Fraction = Fraction(0 )
UpperCamelCase__ :tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCamelCase__ :int = x_num * y_den + x_den * y_num
UpperCamelCase__ :Any = x_den * y_den
UpperCamelCase__ :Tuple = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :List[str] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCamelCase__ :Dict = x_den * x_den * y_den * y_den
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Optional[int] = int(sqrt(__a ) )
UpperCamelCase__ :int = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=-1
UpperCamelCase__ :Tuple = x_num * y_num
UpperCamelCase__ :Union[str, Any] = x_den * y_num + x_num * y_den
UpperCamelCase__ :List[str] = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Union[str, Any] = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :Optional[Any] = x_num * x_num * y_num * y_num
UpperCamelCase__ :Tuple = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :str = int(sqrt(__a ) )
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Dict = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :int = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
for num, den in unique_s:
total += Fraction(__a , __a )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""") | 97 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a ( __a ) -> bool:
'''simple docstring'''
UpperCamelCase__ :int = int(number**0.5 )
return number == sq * sq
def a ( __a , __a , __a , __a , __a , __a ) -> tuple[int, int]:
'''simple docstring'''
UpperCamelCase__ :int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCamelCase__ :int = x_den * y_den * z_den
UpperCamelCase__ :int = gcd(__a , __a )
top //= hcf
bottom //= hcf
return top, bottom
def a ( __a = 35 ) -> int:
'''simple docstring'''
UpperCamelCase__ :set = set()
UpperCamelCase__ :int
UpperCamelCase__ :Fraction = Fraction(0 )
UpperCamelCase__ :tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCamelCase__ :int = x_num * y_den + x_den * y_num
UpperCamelCase__ :Any = x_den * y_den
UpperCamelCase__ :Tuple = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :List[str] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCamelCase__ :Dict = x_den * x_den * y_den * y_den
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Optional[int] = int(sqrt(__a ) )
UpperCamelCase__ :int = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=-1
UpperCamelCase__ :Tuple = x_num * y_num
UpperCamelCase__ :Union[str, Any] = x_den * y_num + x_num * y_den
UpperCamelCase__ :List[str] = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Union[str, Any] = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :Optional[Any] = x_num * x_num * y_num * y_num
UpperCamelCase__ :Tuple = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :str = int(sqrt(__a ) )
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Dict = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :int = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
for num, den in unique_s:
total += Fraction(__a , __a )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""") | 97 | 1 |
"""simple docstring"""
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class a ( nn.Module ):
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
super().__init__()
lowerCAmelCase = module
lowerCAmelCase = nn.Sequential(
nn.Linear(module.in_features , _snake_case , bias=_snake_case ) , nn.Linear(_snake_case , module.out_features , bias=_snake_case ) , )
lowerCAmelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_snake_case )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCamelCase__ ( self , _snake_case , *_snake_case , **_snake_case ):
"""simple docstring"""
return self.module(_snake_case , *_snake_case , **_snake_case ) + self.adapter(_snake_case )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class a ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
snake_case__ = '''bigscience/bloom-1b7'''
# Constant values
snake_case__ = 2.1_09_65_95_52_69_25_74
snake_case__ = '''Hello my name is'''
snake_case__ = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
snake_case__ = 1_0
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = AutoTokenizer.from_pretrained(self.model_name )
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
# Models and tokenizer
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_snake_case , device_map='auto' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_abit.config
self.assertTrue(hasattr(_snake_case , 'quantization_config' ) )
lowerCAmelCase = config.to_dict()
lowerCAmelCase = config.to_diff_dict()
lowerCAmelCase = config.to_json_string()
def UpperCamelCase__ ( self ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
lowerCAmelCase = self.model_fpaa.get_memory_footprint()
lowerCAmelCase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
lowerCAmelCase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCamelCase__ ( self ):
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_snake_case , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.tokenizer(self.input_text , return_tensors='pt' )
lowerCAmelCase = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_snake_case ) , self.EXPECTED_OUTPUTS )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = BitsAndBytesConfig()
lowerCAmelCase = True
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_snake_case , device_map='auto' )
lowerCAmelCase = self.tokenizer(self.input_text , return_tensors='pt' )
lowerCAmelCase = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_snake_case ) , self.EXPECTED_OUTPUTS )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaises(_snake_case ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = BitsAndBytesConfig()
with self.assertRaises(_snake_case ):
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_snake_case , load_in_abit=_snake_case , device_map='auto' , bnb_abit_quant_type='nf4' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaises(_snake_case ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(_snake_case ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_snake_case ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(_snake_case ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_snake_case ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowerCAmelCase = self.tokenizer(self.input_text , return_tensors='pt' )
lowerCAmelCase = self.model_fpaa.to(torch.floataa )
lowerCAmelCase = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
lowerCAmelCase = self.model_fpaa.to('cpu' )
# Check this does not throw an error
lowerCAmelCase = self.model_fpaa.half()
# Check this does not throw an error
lowerCAmelCase = self.model_fpaa.float()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_snake_case , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class a ( unittest.TestCase ):
@classmethod
def UpperCamelCase__ ( cls ):
"""simple docstring"""
lowerCAmelCase = 't5-small'
lowerCAmelCase = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
lowerCAmelCase = AutoTokenizer.from_pretrained(cls.model_name )
lowerCAmelCase = 'Translate in German: Hello, my dog is cute'
def UpperCamelCase__ ( self ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
from transformers import TaForConditionalGeneration
lowerCAmelCase = TaForConditionalGeneration._keep_in_fpaa_modules
lowerCAmelCase = None
# test with `t5-small`
lowerCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_snake_case , device_map='auto' )
lowerCAmelCase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
lowerCAmelCase = model.generate(**_snake_case )
# test with `flan-t5-small`
lowerCAmelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_snake_case , device_map='auto' )
lowerCAmelCase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
lowerCAmelCase = model.generate(**_snake_case )
lowerCAmelCase = modules
def UpperCamelCase__ ( self ):
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowerCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_snake_case , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
lowerCAmelCase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
lowerCAmelCase = model.generate(**_snake_case )
# test with `flan-t5-small`
lowerCAmelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_snake_case , device_map='auto' )
lowerCAmelCase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
lowerCAmelCase = model.generate(**_snake_case )
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
# model_name
lowerCAmelCase = 'bigscience/bloom-560m'
lowerCAmelCase = 't5-small'
# Different types of model
lowerCAmelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=_snake_case , device_map='auto' )
# Sequence classification model
lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_snake_case , device_map='auto' )
# CausalLM model
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_snake_case , device_map='auto' )
# Seq2seq model
lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_snake_case , device_map='auto' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
def UpperCamelCase__ ( self ):
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowerCAmelCase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_snake_case , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
lowerCAmelCase = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
lowerCAmelCase = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_snake_case ) , self.EXPECTED_OUTPUTS )
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'facebook/opt-350m'
super().setUp()
def UpperCamelCase__ ( self ):
"""simple docstring"""
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_snake_case )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
lowerCAmelCase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowerCAmelCase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_snake_case ) ):
lowerCAmelCase = LoRALayer(module.q_proj , rank=16 )
lowerCAmelCase = LoRALayer(module.k_proj , rank=16 )
lowerCAmelCase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
lowerCAmelCase = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowerCAmelCase = model.forward(**_snake_case )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_snake_case , _snake_case ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_snake_case , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class a ( a__ ):
snake_case__ = '''gpt2-xl'''
snake_case__ = 3.31_91_85_48_54_15_21_87
| 309 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=4_00 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=True , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_flip_channel_order
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a ( a__ , unittest.TestCase ):
snake_case__ = MobileViTImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = MobileViTImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , 'do_resize' ) )
self.assertTrue(hasattr(_snake_case , 'size' ) )
self.assertTrue(hasattr(_snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'do_flip_channel_order' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 309 | 1 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : dict ):
"""simple docstring"""
lowercase_ : Union[str, Any] = BeautifulSoup(requests.get(__SCREAMING_SNAKE_CASE , params=__SCREAMING_SNAKE_CASE ).content , '''html.parser''' )
lowercase_ : List[Any] = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
lowercase_ : List[str] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
_lowercase : Optional[int] = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 3_0,
"pages": "3979-3990",
"year": 2_0_1_8,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 93 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase , """width_multiplier""" ) )
class SCREAMING_SNAKE_CASE :
def __init__( self : Dict , __lowercase : Union[str, Any] , __lowercase : Dict=13 , __lowercase : int=64 , __lowercase : Tuple=2 , __lowercase : Tuple=3 , __lowercase : Tuple="swish" , __lowercase : List[Any]=3 , __lowercase : List[str]=32 , __lowercase : int=0.1 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[int]=True , __lowercase : Dict=True , __lowercase : Tuple=10 , __lowercase : str=None , __lowercase : Optional[Any]=0.25 , __lowercase : str=0.0 , __lowercase : Optional[Any]=0.0 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(512 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def UpperCamelCase_ ( self : Tuple , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Tuple ):
'''simple docstring'''
__a = MobileViTVaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : int , __lowercase : str , __lowercase : Any , __lowercase : int , __lowercase : List[str] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[Any] =(
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCamelCase : Any =(
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Dict =False
__lowerCamelCase : Optional[Any] =False
__lowerCamelCase : int =False
__lowerCamelCase : Any =False
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__lowercase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(__lowercase : List[str] , __lowercase : Optional[int] , __lowercase : List[str] ):
__a = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(__lowercase ) , __lowercase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(__lowercase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
__lowercase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__a = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __lowercase )
__a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=__lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __lowercase )
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __lowercase )
| 302 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_50, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_00, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class A ( unittest.TestCase ):
def lowercase_ (self : str ) -> Union[str, Any]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=__UpperCAmelCase , )
assert hasattr(self , "env" )
def lowercase_ (self : int , __UpperCAmelCase : List[str]=1 ) -> Optional[Any]:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=__UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCAmelCase , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def lowercase_ (self : Dict , __UpperCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
TrainingJobAnalytics(__UpperCAmelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def lowercase_ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
UpperCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __UpperCAmelCase )
| 143 | from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase__ = {'tokenization_bertweet': ['BertweetTokenizer']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 143 | 1 |
"""simple docstring"""
import math
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] , __snake_case : str ):
'''simple docstring'''
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(__snake_case )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
_UpperCamelCase : Dict = 'Enter the base and the power separated by a comma: '
_UpperCamelCase, _UpperCamelCase : str = map(int, input(prompt).split(','))
_UpperCamelCase, _UpperCamelCase : int = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
_UpperCamelCase : Optional[Any] = res(xa, ya)
_UpperCamelCase : List[str] = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 220 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
_UpperCamelCase : Tuple = logging.getLogger()
def _SCREAMING_SNAKE_CASE ( __snake_case : Path , __snake_case : list ):
'''simple docstring'''
lowercase = '\n'.join(__snake_case )
Path(__snake_case ).open('w' ).writelines(__snake_case )
_UpperCamelCase : Union[str, Any] = 'patrickvonplaten/t5-tiny-random'
_UpperCamelCase : Union[str, Any] = 'sshleifer/bart-tiny-random'
_UpperCamelCase : Tuple = 'sshleifer/tiny-mbart'
_UpperCamelCase : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class a ( a_ ):
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
lowercase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
lowercase = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(_lowerCamelCase , _lowerCamelCase )
lowercase = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
lowercase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
lowercase = F'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(_lowerCamelCase , 'argv' , _lowerCamelCase ):
run_generate()
assert Path(_lowerCamelCase ).exists()
# os.remove(Path(output_file_name))
def UpperCamelCase_ ( self ):
self.run_eval_tester(_lowerCamelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def UpperCamelCase_ ( self , _lowerCamelCase ):
self.run_eval_tester(_lowerCamelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
lowercase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
lowercase = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
lowercase = Path(self.get_auto_remove_tmp_dir() )
lowercase = str(tmp_dir / 'scores.json' )
lowercase = str(tmp_dir / 'val.target' )
_dump_articles(_lowerCamelCase , text['en'] )
_dump_articles(_lowerCamelCase , text['de'] )
lowercase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
lowercase = F'\n run_eval_search.py\n {model}\n {str(_lowerCamelCase )}\n {str(_lowerCamelCase )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(_lowerCamelCase , 'argv' , _lowerCamelCase ):
with CaptureStdout() as cs:
run_search()
lowercase = [' num_beams | length_penalty', model, 'Best score args']
lowercase = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(_lowerCamelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_lowerCamelCase ).exists()
os.remove(Path(_lowerCamelCase ) )
| 220 | 1 |
from __future__ import annotations
from typing import Any
class A__ :
def __init__( self , UpperCamelCase__ = 6 ) -> None:
'''simple docstring'''
A_ = None
A_ = None
self.create_linked_list(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = Node()
A_ = current_node
A_ = current_node
A_ = current_node
for _ in range(1 , UpperCamelCase__ ):
A_ = Node()
A_ = current_node
A_ = previous_node
A_ = current_node
A_ = self.front
A_ = previous_node
def snake_case_ ( self ) -> bool:
'''simple docstring'''
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def snake_case_ ( self ) -> Any | None:
'''simple docstring'''
self.check_can_perform_operation()
return self.front.data if self.front else None
def snake_case_ ( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
A_ = self.rear.next
if self.rear:
A_ = data
def snake_case_ ( self ) -> Any:
'''simple docstring'''
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
A_ = self.front.data
A_ = None
return data
A_ = self.front
A_ = old_front.next
A_ = old_front.data
A_ = None
return data
def snake_case_ ( self ) -> None:
'''simple docstring'''
if self.is_empty():
raise Exception("""Empty Queue""" )
def snake_case_ ( self ) -> None:
'''simple docstring'''
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class A__ :
def __init__( self ) -> None:
'''simple docstring'''
A_ = None
A_ = None
A_ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00 ) -> int:
A_ = n * (n + 1) * (2 * n + 1) / 6
A_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 101 | 0 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 , lowercase__ : str = "bert-base-cased" ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = AutoTokenizer.from_pretrained(lowercase__ )
lowerCAmelCase_ :int = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ :List[str] = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase_ :int = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :Optional[int] = config["""lr"""]
lowerCAmelCase_ :str = int(config["""num_epochs"""] )
lowerCAmelCase_ :Any = int(config["""seed"""] )
lowerCAmelCase_ :Optional[int] = int(config["""batch_size"""] )
lowerCAmelCase_ :List[Any] = args.model_name_or_path
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
lowerCAmelCase_ :List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase_ :Dict = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase_ :List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCAmelCase_ :List[Any] = 1
lowerCAmelCase_ :List[Any] = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase_ :List[str] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
lowerCAmelCase_ :Dict = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase_ :List[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase_ :Tuple = 0
# Now we train the model
lowerCAmelCase_ :Any = evaluate.load("""glue""" , """mrpc""" )
lowerCAmelCase_ :Any = 0
lowerCAmelCase_ :int = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
lowerCAmelCase_ :Optional[Any] = model(**lowercase__ )
lowerCAmelCase_ :int = outputs.loss
lowerCAmelCase_ :Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowerCAmelCase_ :Dict = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Union[str, Any] = model(**lowercase__ )
lowerCAmelCase_ :Tuple = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
lowerCAmelCase_ :Optional[int] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase_ :Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
lowerCAmelCase_ :int = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
lowerCAmelCase_ :Dict = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def _snake_case ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Dict = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , )
parser.add_argument(
"""--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=lowercase__ , default=lowercase__ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=3 , help="""Number of train epochs.""" , )
lowerCAmelCase_ :Tuple = parser.parse_args()
lowerCAmelCase_ :Optional[int] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 84 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _snake_case ( ) -> Generator[int, None, None]:
'''simple docstring'''
lowerCAmelCase_ :dict[int, int] = {}
lowerCAmelCase_ :int = 2
while True:
lowerCAmelCase_ :List[Any] = factor_map.pop(lowercase__ , lowercase__ )
if factor:
lowerCAmelCase_ :Optional[int] = factor + prime
while x in factor_map:
x += factor
lowerCAmelCase_ :List[str] = factor
else:
lowerCAmelCase_ :Optional[int] = prime
yield prime
prime += 1
def _snake_case ( lowercase__ : float = 1E10 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = sieve()
lowerCAmelCase_ :str = 1
while True:
lowerCAmelCase_ :int = next(lowercase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowercase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 84 | 1 |
_snake_case = {}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_lowerCAmelCase : List[str] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_lowerCAmelCase : Any = _calculate(days - 1 , _lowerCamelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_lowerCAmelCase : int = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_lowerCAmelCase : Tuple = _calculate(days - 1 , _lowerCamelCase , 0 )
_lowerCAmelCase : int = state_late + state_absent + state_ontime
_lowerCAmelCase : List[str] = prizestrings
return prizestrings
def A ( _lowerCamelCase = 30 ):
'''simple docstring'''
return _calculate(_lowerCamelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 300 |
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a = 0):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = row, column
_lowerCAmelCase : str = [[default_value for c in range(__a)] for r in range(__a)]
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
_lowerCAmelCase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowerCAmelCase : List[str] = max(__a, len(str(__a)))
_lowerCAmelCase : Union[str, Any] = f"%{max_element_length}s"
# Make string and return
def single_line(__a) -> str:
nonlocal string_format_identifier
_lowerCAmelCase : Dict = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(__a) for row_vector in self.array)
return s
def __repr__( self):
'''simple docstring'''
return str(self)
def snake_case__ ( self, __a):
'''simple docstring'''
if not (isinstance(__a, (list, tuple)) and len(__a) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
return self.array[loc[0]][loc[1]]
def __setitem__( self, __a, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
_lowerCAmelCase : Union[str, Any] = value
def __add__( self, __a):
'''simple docstring'''
assert isinstance(__a, __a)
assert self.row == another.row and self.column == another.column
# Add
_lowerCAmelCase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c] + another[r, c]
return result
def __neg__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : str = -self[r, c]
return result
def __sub__( self, __a):
'''simple docstring'''
return self + (-another)
def __mul__( self, __a):
'''simple docstring'''
if isinstance(__a, (int, float)): # Scalar multiplication
_lowerCAmelCase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Optional[Any] = self[r, c] * another
return result
elif isinstance(__a, __a): # Matrix multiplication
assert self.column == another.row
_lowerCAmelCase : List[str] = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowerCAmelCase : Optional[Any] = f"Unsupported type given for another ({type(__a)})"
raise TypeError(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c]
return result
def snake_case__ ( self, __a, __a):
'''simple docstring'''
assert isinstance(__a, __a) and isinstance(__a, __a)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowerCAmelCase : int = v.transpose()
_lowerCAmelCase : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowerCAmelCase : Union[str, Any] = 1
print(F"a^(-1) is {ainv}" )
# u, v
_lowerCAmelCase : Any = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 1, 2, -3
_lowerCAmelCase : List[Any] = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}" )
def A ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 300 | 1 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class a_ (nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : jnp.dtype = jnp.floataa
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , snake_case_ ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = hidden_states.shape
_lowerCAmelCase : str = jax.image.resize(
snake_case_ , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
_lowerCAmelCase : int = self.conv(snake_case_ )
return hidden_states
class a_ (nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : jnp.dtype = jnp.floataa
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , snake_case_ ):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
_lowerCAmelCase : List[str] = self.conv(snake_case_ )
return hidden_states
class a_ (nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : int = None
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : bool = None
__lowerCAmelCase : jnp.dtype = jnp.floataa
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.in_channels if self.out_channels is None else self.out_channels
_lowerCAmelCase : Union[str, Any] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
_lowerCAmelCase : str = nn.Conv(
snake_case_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowerCAmelCase : int = nn.Dense(snake_case_ , dtype=self.dtype )
_lowerCAmelCase : Any = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
_lowerCAmelCase : int = nn.Dropout(self.dropout_prob )
_lowerCAmelCase : Optional[Any] = nn.Conv(
snake_case_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowerCAmelCase : Dict = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_lowerCAmelCase : Tuple = None
if use_nin_shortcut:
_lowerCAmelCase : Any = nn.Conv(
snake_case_ , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self , snake_case_ , snake_case_ , snake_case_=True ):
_lowerCAmelCase : Optional[int] = hidden_states
_lowerCAmelCase : Tuple = self.norma(snake_case_ )
_lowerCAmelCase : int = nn.swish(snake_case_ )
_lowerCAmelCase : int = self.conva(snake_case_ )
_lowerCAmelCase : Optional[int] = self.time_emb_proj(nn.swish(snake_case_ ) )
_lowerCAmelCase : Dict = jnp.expand_dims(jnp.expand_dims(snake_case_ , 1 ) , 1 )
_lowerCAmelCase : List[str] = hidden_states + temb
_lowerCAmelCase : str = self.norma(snake_case_ )
_lowerCAmelCase : Union[str, Any] = nn.swish(snake_case_ )
_lowerCAmelCase : Optional[Any] = self.dropout(snake_case_ , snake_case_ )
_lowerCAmelCase : str = self.conva(snake_case_ )
if self.conv_shortcut is not None:
_lowerCAmelCase : List[str] = self.conv_shortcut(snake_case_ )
return hidden_states + residual
| 309 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
return image
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Dict:
_lowerCAmelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ) -> Optional[Any]:
_lowerCAmelCase : str = dct.pop(_lowerCamelCase )
_lowerCAmelCase : str = val
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ) -> Tuple:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCAmelCase : Tuple = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_lowerCAmelCase : int = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
_lowerCAmelCase : str = qkv_bias
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : str = 3_64 if """coco""" in model_name else 2_24
_lowerCAmelCase : str = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_lowerCAmelCase : int = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_lowerCAmelCase : Union[str, Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_lowerCAmelCase : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_lowerCAmelCase : Dict = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=None , _lowerCamelCase : int=False ) -> List[str]:
_lowerCAmelCase : int = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_lowerCAmelCase : List[Any] = tokenizer("""\n""" , add_special_tokens=_lowerCamelCase ).input_ids[0]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCAmelCase : Union[str, Any] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_lowerCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_lowerCAmelCase : List[Any] = original_model.state_dict()
_lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCAmelCase : Tuple = state_dict.pop(_lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
_lowerCAmelCase : List[Any] = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_lowerCAmelCase : Optional[int] = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_lowerCAmelCase : Dict = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_lowerCAmelCase : Tuple = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_lowerCAmelCase : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_lowerCAmelCase : int = key.replace("""t5""" , """language""" )
_lowerCAmelCase : Tuple = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_lowerCAmelCase : Union[str, Any] = load_demo_image()
_lowerCAmelCase : Optional[int] = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_lowerCAmelCase : List[str] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
# create processor
_lowerCAmelCase : Optional[int] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
_lowerCAmelCase : Tuple = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
_lowerCAmelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_lowerCAmelCase : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_lowerCAmelCase : Optional[Any] = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
_lowerCAmelCase : List[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_lowerCAmelCase : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_lowerCAmelCase : Dict = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_lowerCAmelCase : Any = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_lowerCAmelCase : List[Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
_lowerCAmelCase : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
_lowerCAmelCase : List[Any] = original_model.generate({"""image""": original_pixel_values} )
_lowerCAmelCase : Dict = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , _lowerCamelCase )
_lowerCAmelCase : int = input_ids.shape[1]
_lowerCAmelCase : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
_lowerCAmelCase : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309 | 1 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
A = remove_duplicates(key.upper() )
A = len(lowercase__ )
# First fill cipher with key characters
A = {alphabet[i]: char for i, char in enumerate(lowercase__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowercase__ ) , 26 ):
A = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
A = alphabet[i - offset]
A = char
return cipher_alphabet
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ):
"""simple docstring"""
return "".join(cipher_map.get(lowercase__ , lowercase__ ) for ch in message.upper() )
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ):
"""simple docstring"""
A = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowercase__ , lowercase__ ) for ch in message.upper() )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = input("Enter message to encode or decode: " ).strip()
A = input("Enter keyword: " ).strip()
A = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
A = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
A = create_cipher_map(lowercase__ )
print(func(lowercase__ , lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 350 |
"""simple docstring"""
__A : Dict = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__A : List[Any] = [{'type': 'code', 'content': INSTALL_CONTENT}]
__A : List[Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 57 | 0 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def UpperCamelCase__ ( A__ , A__ , A__ ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
snake_case__ : List[str] = (low + high) // 2
snake_case__ , snake_case__ , snake_case__ : str = max_subarray(A__ , A__ , A__ )
snake_case__ , snake_case__ , snake_case__ : int = max_subarray(A__ , mid + 1 , A__ )
snake_case__ , snake_case__ , snake_case__ : Optional[Any] = max_cross_sum(A__ , A__ , A__ , A__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def UpperCamelCase__ ( A__ , A__ , A__ , A__ ) -> tuple[int, int, float]:
snake_case__ , snake_case__ : Optional[int] = float('-inf' ), -1
snake_case__ , snake_case__ : str = float('-inf' ), -1
snake_case__ : int | float = 0
for i in range(A__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
snake_case__ : Dict = summ
snake_case__ : str = i
snake_case__ : str = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
snake_case__ : int = summ
snake_case__ : List[str] = i
return max_left, max_right, (left_sum + right_sum)
def UpperCamelCase__ ( A__ ) -> float:
snake_case__ : Any = [randint(1 , A__ ) for _ in range(A__ )]
snake_case__ : Optional[Any] = time.time()
max_subarray(A__ , 0 , input_size - 1 )
snake_case__ : int = time.time()
return end - start
def UpperCamelCase__ ( ) -> None:
snake_case__ : Dict = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
snake_case__ : List[Any] = [time_max_subarray(A__ ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(A__ , A__ ):
print(A__ , '\t\t' , A__ )
plt.plot(A__ , A__ )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 143 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase__ : List[str] = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 143 | 1 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCamelCase_ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
UpperCamelCase_ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
UpperCamelCase_ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ), id='references' ),
} ), )
def UpperCamelCase_ ( self, A, A, A = 1, A = 4, ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=A, hypotheses=A, min_len=A, max_len=A )
}
| 246 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
SCREAMING_SNAKE_CASE : List[str] = n - k
# Calculate C(n,k)
for i in range(__UpperCamelCase ):
result *= n - i
result //= i + 1
return result
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
return binomial_coefficient(2 * node_count ,__UpperCamelCase ) // (node_count + 1)
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if n < 0:
raise ValueError('factorial() not defined for negative values' )
SCREAMING_SNAKE_CASE : Optional[Any] = 1
for i in range(1 ,n + 1 ):
result *= i
return result
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
return catalan_number(__UpperCamelCase ) * factorial(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
F"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
F"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 246 | 1 |
"""simple docstring"""
def UpperCamelCase__ ( lowercase__ : Optional[Any] , lowercase__ : Tuple ):
return int(input_a == input_a == 0 )
def UpperCamelCase__ ( ):
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 148 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowercase__ :str = 2
class lowercase :
def __init__( self ,*, # begin keyword-only arguments
A__="<s>" ,A__="<pad>" ,A__="</s>" ,A__="<unk>" ,A__=None ,):
lowercase , lowercase , lowercase , lowercase = bos, unk, pad, eos
lowercase = []
lowercase = []
lowercase = {}
lowercase = self.add_symbol(A__)
lowercase = self.add_symbol(A__)
lowercase = self.add_symbol(A__)
lowercase = self.add_symbol(A__)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(A__)
lowercase = len(self.symbols)
def __eq__( self ,A__):
return self.indices == other.indices
def __getitem__( self ,A__):
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__( self):
return len(self.symbols)
def __contains__( self ,A__):
return sym in self.indices
@classmethod
def A__ ( cls ,A__):
lowercase = cls()
d.add_from_file(A__)
return d
def A__ ( self ,A__ ,A__=1 ,A__=False):
if word in self.indices and not overwrite:
lowercase = self.indices[word]
lowercase = self.count[idx] + n
return idx
else:
lowercase = len(self.symbols)
lowercase = idx
self.symbols.append(A__)
self.count.append(A__)
return idx
def A__ ( self ,A__):
return 0
def A__ ( self ,A__):
if isinstance(A__ ,A__):
try:
with open(A__ ,'''r''' ,encoding='''utf-8''') as fd:
self.add_from_file(A__)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(A__))
return
lowercase = f.readlines()
lowercase = self._load_meta(A__)
for line in lines[indices_start_line:]:
try:
lowercase , lowercase = line.rstrip().rsplit(''' ''' ,1)
if field == "#fairseq:overwrite":
lowercase = True
lowercase , lowercase = line.rsplit(''' ''' ,1)
else:
lowercase = False
lowercase = int(A__)
lowercase = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(A__))
self.add_symbol(A__ ,n=A__ ,overwrite=A__)
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''')
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowercase = dict((re.sub(R'''@@$''' , '''''' , lowerCAmelCase__ ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , lowerCAmelCase__ ), v) for k, v in d.items() )
lowercase = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f'{k}</w>']
lowercase = d[k] # restore
return da
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# prep
if not os.path.exists(lowerCAmelCase__ ):
raise ValueError(f'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
print(f'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
lowercase = os.path.join(lowerCAmelCase__ , '''checkpoint.pt''' )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(f'path to the file {checkpoint_file} does not exist!' )
lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
lowercase = chkpt['''cfg''']['''model''']
# dicts
lowercase = os.path.join(lowerCAmelCase__ , '''dict.txt''' )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(f'path to the file {dict_file} does not exist!' )
lowercase = Dictionary.load(lowerCAmelCase__ )
lowercase = rewrite_dict_keys(src_dict.indices )
lowercase = len(lowerCAmelCase__ )
lowercase = os.path.join(lowerCAmelCase__ , VOCAB_FILES_NAMES['''vocab_file'''] )
print(f'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# merges_file (bpecodes)
lowercase = os.path.join(lowerCAmelCase__ , '''bpecodes''' )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(f'path to the file {bpecodes_file} does not exist!' )
lowercase = os.path.join(lowerCAmelCase__ , VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
# model config
lowercase = os.path.join(lowerCAmelCase__ , '''config.json''' )
lowercase = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.02,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1E-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(f'Generating {biogpt_model_config_file}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# tokenizer config
lowercase = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 1024,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(f'Generating {biogpt_tokenizer_config_file}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# model
lowercase = chkpt['''model''']
# remove unneeded keys
lowercase = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
lowercase = model_state_dict.pop(lowerCAmelCase__ )
else:
lowercase = model_state_dict.pop(lowerCAmelCase__ )
lowercase = BioGptConfig.from_pretrained(lowerCAmelCase__ )
lowercase = BioGptForCausalLM(lowerCAmelCase__ )
# check that it loads ok
model_new.load_state_dict(lowerCAmelCase__ )
# save
lowercase = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
print(f'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
print('''Conversion is done!''' )
if __name__ == "__main__":
lowercase__ :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--biogpt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ :Any = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 101 | 0 |
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ ) -> Tuple:
if not isinstance(_A , _A ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_A ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(_A ) == 1:
return True
_a : int = series[1] - series[0]
for index in range(len(_A ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
if not isinstance(_A , _A ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_A ) == 0:
raise ValueError('Input list must be a non empty list' )
_a : Dict = 0
for val in series:
answer += val
return answer / len(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
__lowerCAmelCase = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
__lowerCAmelCase = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
__lowerCAmelCase = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 107 | 0 |
from collections.abc import Callable
import numpy as np
def __snake_case ( _lowerCAmelCase : Callable , _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ) -> np.array:
A_ : List[Any] = int(np.ceil((x_end - xa) / step_size ) )
A_ : Dict = np.zeros((n + 1,) )
A_ : str = ya
A_ : str = xa
for k in range(_lowerCAmelCase ):
A_ : int = y[k] + step_size * ode_func(_lowerCAmelCase , y[k] )
A_ : Tuple = y[k] + (
(step_size / 2) * (ode_func(_lowerCAmelCase , y[k] ) + ode_func(x + step_size , _lowerCAmelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300 |
def __snake_case ( _lowerCAmelCase : list ) -> list:
if len(_lowerCAmelCase ) <= 1:
return [tuple(_lowerCAmelCase )]
A_ : Tuple = []
def generate(_lowerCAmelCase : int , _lowerCAmelCase : list ):
A_ : List[str] = [0] * n
res.append(tuple(_lowerCAmelCase ) )
A_ : int = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
A_ , A_ : str = arr[i], arr[0]
else:
A_ , A_ : List[str] = arr[i], arr[c[i]]
res.append(tuple(_lowerCAmelCase ) )
c[i] += 1
A_ : Tuple = 0
else:
A_ : Dict = 0
i += 1
generate(len(_lowerCAmelCase ) , _lowerCAmelCase )
return res
if __name__ == "__main__":
_lowerCAmelCase : str = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCAmelCase : str = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 300 | 1 |
def __snake_case ( _lowerCAmelCase : Optional[Any] = 100 ) -> int:
A_ : int = set()
A_ : Tuple = 0
A_ : List[Any] = n + 1 # maximum limit
for a in range(2 , __snake_case ):
for b in range(2 , __snake_case ):
A_ : Tuple = a**b # calculates the current power
collect_powers.add(__snake_case ) # adds the result to the set
return len(__snake_case )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 361 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
_lowerCAmelCase : Tuple = logging.getLogger(__name__)
def __snake_case ( ) -> Tuple:
A_ : List[str] = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=_lowerCAmelCase , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=_lowerCAmelCase , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=_lowerCAmelCase , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=_lowerCAmelCase , default="data/dump" , help="The dump file prefix." )
A_ : int = parser.parse_args()
logger.info(f"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
A_ : int = BertTokenizer.from_pretrained(args.tokenizer_name )
A_ : Union[str, Any] = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
A_ : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
A_ : Dict = RobertaTokenizer.from_pretrained(args.tokenizer_name )
A_ : List[str] = tokenizer.special_tokens_map["cls_token"] # `<s>`
A_ : Any = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
A_ : Union[str, Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
A_ : Any = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
A_ : Union[str, Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(f"Loading text from {args.file_path}" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
A_ : Union[str, Any] = fp.readlines()
logger.info("Start encoding" )
logger.info(f"{len(_lowerCAmelCase )} examples to process." )
A_ : List[Any] = []
A_ : Tuple = 0
A_ : Union[str, Any] = 10000
A_ : Optional[int] = time.time()
for text in data:
A_ : Any = f"{bos} {text.strip()} {sep}"
A_ : List[Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
rslt.append(_lowerCAmelCase )
iter += 1
if iter % interval == 0:
A_ : str = time.time()
logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
A_ : Union[str, Any] = time.time()
logger.info("Finished binarization" )
logger.info(f"{len(_lowerCAmelCase )} examples processed." )
A_ : int = f"{args.dump_file}.{args.tokenizer_name}.pickle"
A_ : List[Any] = tokenizer.vocab_size
if vocab_size < (1 << 16):
A_ : Union[str, Any] = [np.uintaa(_lowerCAmelCase ) for d in rslt]
else:
A_ : List[str] = [np.intaa(_lowerCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"Dump to {dp_file}" )
with open(_lowerCAmelCase , "wb" ) as handle:
pickle.dump(rslt_ , _lowerCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 70 | 0 |
from ..utils import DummyObject, requires_backends
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = ["""torch"""]
def __init__( self : Dict , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Optional[int] ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Any , *lowerCamelCase : Optional[int] , **lowerCamelCase : Tuple ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Tuple , *lowerCamelCase : List[str] , **lowerCamelCase : Union[str, Any] ) -> Tuple:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Tuple = ["""torch"""]
def __init__( self : Dict , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Any ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : str , *lowerCamelCase : List[Any] , **lowerCamelCase : Dict ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : int , **lowerCamelCase : int ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ["""torch"""]
def __init__( self : str , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : List[str] ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[int] , *lowerCamelCase : int , **lowerCamelCase : List[Any] ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : List[Any] ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ["""torch"""]
def __init__( self : Optional[int] , *lowerCamelCase : Tuple , **lowerCamelCase : Dict ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Union[str, Any] , *lowerCamelCase : Dict , **lowerCamelCase : str ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : Optional[int] , **lowerCamelCase : Any ) -> Any:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCamelCase : int , **lowerCamelCase : Dict ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[int] , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Optional[int] ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : int , **lowerCamelCase : Tuple ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = ["""torch"""]
def __init__( self : str , *lowerCamelCase : Dict , **lowerCamelCase : Optional[Any] ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : Tuple ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : str , **lowerCamelCase : Dict ) -> int:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Dict = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCamelCase : List[str] , **lowerCamelCase : Union[str, Any] ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Union[str, Any] , *lowerCamelCase : List[str] , **lowerCamelCase : str ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Any , *lowerCamelCase : List[Any] , **lowerCamelCase : int ) -> Tuple:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : int = ["""torch"""]
def __init__( self : Dict , *lowerCamelCase : Optional[int] , **lowerCamelCase : Tuple ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[Any] , *lowerCamelCase : Dict , **lowerCamelCase : Union[str, Any] ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : str , *lowerCamelCase : Dict , **lowerCamelCase : List[str] ) -> List[Any]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = ["""torch"""]
def __init__( self : Dict , *lowerCamelCase : Tuple , **lowerCamelCase : List[Any] ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : Optional[int] ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Any , *lowerCamelCase : Any , **lowerCamelCase : int ) -> str:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ["""torch"""]
def __init__( self : List[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : Tuple ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : str ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[Any] , *lowerCamelCase : List[Any] , **lowerCamelCase : Tuple ) -> Dict:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : int = ["""torch"""]
def __init__( self : List[str] , *lowerCamelCase : Dict , **lowerCamelCase : List[str] ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Tuple , *lowerCamelCase : List[str] , **lowerCamelCase : Dict ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : List[str] , **lowerCamelCase : Tuple ) -> str:
requires_backends(cls , ["torch"] )
def lowerCAmelCase_ ( *__lowerCamelCase , **__lowerCamelCase ):
requires_backends(_UpperCamelCase , ["torch"] )
def lowerCAmelCase_ ( *__lowerCamelCase , **__lowerCamelCase ):
requires_backends(_UpperCamelCase , ["torch"] )
def lowerCAmelCase_ ( *__lowerCamelCase , **__lowerCamelCase ):
requires_backends(_UpperCamelCase , ["torch"] )
def lowerCAmelCase_ ( *__lowerCamelCase , **__lowerCamelCase ):
requires_backends(_UpperCamelCase , ["torch"] )
def lowerCAmelCase_ ( *__lowerCamelCase , **__lowerCamelCase ):
requires_backends(_UpperCamelCase , ["torch"] )
def lowerCAmelCase_ ( *__lowerCamelCase , **__lowerCamelCase ):
requires_backends(_UpperCamelCase , ["torch"] )
def lowerCAmelCase_ ( *__lowerCamelCase , **__lowerCamelCase ):
requires_backends(_UpperCamelCase , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ["""torch"""]
def __init__( self : str , *lowerCamelCase : int , **lowerCamelCase : Any ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : int , **lowerCamelCase : List[str] ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Union[str, Any] , *lowerCamelCase : Optional[int] , **lowerCamelCase : List[Any] ) -> Tuple:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = ["""torch"""]
def __init__( self : str , *lowerCamelCase : Dict , **lowerCamelCase : Optional[int] ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : List[str] , **lowerCamelCase : Any ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : str , *lowerCamelCase : Dict , **lowerCamelCase : Any ) -> Tuple:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Tuple = ["""torch"""]
def __init__( self : Any , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Any ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : str ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : int = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCamelCase : Optional[int] , **lowerCamelCase : Any ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[Any] , *lowerCamelCase : str , **lowerCamelCase : str ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[int] , *lowerCamelCase : Optional[int] , **lowerCamelCase : Optional[int] ) -> int:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ["""torch"""]
def __init__( self : Dict , *lowerCamelCase : Optional[Any] , **lowerCamelCase : int ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[Any] , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Tuple , *lowerCamelCase : List[str] , **lowerCamelCase : List[str] ) -> List[str]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCamelCase : Any , **lowerCamelCase : str ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Union[str, Any] , *lowerCamelCase : Tuple , **lowerCamelCase : List[Any] ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : int , *lowerCamelCase : List[Any] , **lowerCamelCase : str ) -> int:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = ["""torch"""]
def __init__( self : int , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Tuple ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : List[str] , **lowerCamelCase : Dict ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Tuple , *lowerCamelCase : str , **lowerCamelCase : str ) -> List[str]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ["""torch"""]
def __init__( self : int , *lowerCamelCase : Tuple , **lowerCamelCase : Optional[int] ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : List[Any] , **lowerCamelCase : Tuple ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : Tuple , **lowerCamelCase : List[str] ) -> List[str]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : str = ["""torch"""]
def __init__( self : int , *lowerCamelCase : Optional[int] , **lowerCamelCase : List[str] ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : int , **lowerCamelCase : int ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : Dict , **lowerCamelCase : Any ) -> str:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Tuple = ["""torch"""]
def __init__( self : Dict , *lowerCamelCase : str , **lowerCamelCase : Dict ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[Any] , *lowerCamelCase : Any , **lowerCamelCase : Union[str, Any] ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[int] , *lowerCamelCase : List[str] , **lowerCamelCase : List[str] ) -> List[str]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : str = ["""torch"""]
def __init__( self : Optional[int] , *lowerCamelCase : str , **lowerCamelCase : Dict ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : str , *lowerCamelCase : str , **lowerCamelCase : Optional[int] ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : Dict , **lowerCamelCase : int ) -> Any:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : str = ["""torch"""]
def __init__( self : Optional[int] , *lowerCamelCase : Optional[Any] , **lowerCamelCase : List[Any] ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[Any] , *lowerCamelCase : Dict , **lowerCamelCase : Tuple ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : str , *lowerCamelCase : int , **lowerCamelCase : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ["""torch"""]
def __init__( self : int , *lowerCamelCase : int , **lowerCamelCase : Tuple ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : str , **lowerCamelCase : Optional[int] ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : Tuple , **lowerCamelCase : Tuple ) -> int:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCamelCase : str , **lowerCamelCase : Any ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : str , *lowerCamelCase : Dict , **lowerCamelCase : Union[str, Any] ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : str , *lowerCamelCase : List[str] , **lowerCamelCase : Optional[int] ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Tuple = ["""torch"""]
def __init__( self : List[str] , *lowerCamelCase : List[Any] , **lowerCamelCase : List[Any] ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Any , *lowerCamelCase : Any , **lowerCamelCase : int ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : int , *lowerCamelCase : str , **lowerCamelCase : List[Any] ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : int = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCamelCase : Dict , **lowerCamelCase : Union[str, Any] ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : List[str] ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : int , **lowerCamelCase : Optional[Any] ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : str = ["""torch"""]
def __init__( self : Tuple , *lowerCamelCase : List[str] , **lowerCamelCase : List[str] ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : str , *lowerCamelCase : str , **lowerCamelCase : int ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Union[str, Any] , *lowerCamelCase : int , **lowerCamelCase : Dict ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ["""torch"""]
def __init__( self : str , *lowerCamelCase : int , **lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[int] , *lowerCamelCase : Optional[int] , **lowerCamelCase : str ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : int , *lowerCamelCase : List[str] , **lowerCamelCase : int ) -> int:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCamelCase : List[Any] , **lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Union[str, Any] , *lowerCamelCase : Dict , **lowerCamelCase : Dict ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : int , *lowerCamelCase : Any , **lowerCamelCase : Any ) -> Any:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Dict = ["""torch"""]
def __init__( self : Dict , *lowerCamelCase : str , **lowerCamelCase : List[Any] ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : List[Any] , **lowerCamelCase : Union[str, Any] ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : str , **lowerCamelCase : Tuple ) -> Dict:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ["""torch"""]
def __init__( self : int , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Dict ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Union[str, Any] , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Dict ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : Any , **lowerCamelCase : str ) -> List[Any]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ["""torch"""]
def __init__( self : Tuple , *lowerCamelCase : Optional[int] , **lowerCamelCase : str ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[Any] , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Optional[Any] ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : List[Any] , *lowerCamelCase : List[str] , **lowerCamelCase : Tuple ) -> Any:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : str = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCamelCase : str , **lowerCamelCase : Tuple ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : str , *lowerCamelCase : Tuple , **lowerCamelCase : Tuple ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : List[Any] , **lowerCamelCase : Tuple ) -> Any:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCamelCase : Dict , **lowerCamelCase : int ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Union[str, Any] , *lowerCamelCase : Dict , **lowerCamelCase : Optional[int] ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Any , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Any ) -> Dict:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : str = ["""torch"""]
def __init__( self : str , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Union[str, Any] ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Tuple , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Tuple ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : List[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : Optional[Any] ) -> Tuple:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCamelCase : str , **lowerCamelCase : Any ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Any , *lowerCamelCase : Any , **lowerCamelCase : List[str] ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Tuple , *lowerCamelCase : Tuple , **lowerCamelCase : Dict ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Dict = ["""torch"""]
def __init__( self : Any , *lowerCamelCase : List[Any] , **lowerCamelCase : Optional[int] ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : Any , **lowerCamelCase : int ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : int , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Tuple ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : int = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCamelCase : List[str] , **lowerCamelCase : Optional[int] ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Tuple , *lowerCamelCase : List[str] , **lowerCamelCase : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[int] , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : str ) -> Dict:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = ["""torch"""]
def __init__( self : Tuple , *lowerCamelCase : List[str] , **lowerCamelCase : Union[str, Any] ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : Dict ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Union[str, Any] , *lowerCamelCase : Any , **lowerCamelCase : Any ) -> Any:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Tuple = ["""torch"""]
def __init__( self : Optional[int] , *lowerCamelCase : Dict , **lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Union[str, Any] , *lowerCamelCase : List[str] , **lowerCamelCase : int ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Tuple , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Any ) -> List[str]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ["""torch"""]
def __init__( self : List[Any] , *lowerCamelCase : Dict , **lowerCamelCase : Any ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[int] , *lowerCamelCase : int , **lowerCamelCase : List[Any] ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : str , *lowerCamelCase : str , **lowerCamelCase : Dict ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : str ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[Any] , *lowerCamelCase : List[Any] , **lowerCamelCase : List[str] ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Tuple , *lowerCamelCase : str , **lowerCamelCase : Optional[int] ) -> int:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Tuple = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCamelCase : List[Any] , **lowerCamelCase : str ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[int] , *lowerCamelCase : int , **lowerCamelCase : Optional[int] ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[Any] , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : int ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCamelCase : Optional[int] , **lowerCamelCase : Any ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[int] , *lowerCamelCase : Tuple , **lowerCamelCase : List[Any] ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : List[str] , **lowerCamelCase : List[str] ) -> Tuple:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ["""torch"""]
def __init__( self : Optional[int] , *lowerCamelCase : Dict , **lowerCamelCase : Any ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Dict ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : List[Any] , *lowerCamelCase : Dict , **lowerCamelCase : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : Dict ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Tuple , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Tuple ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : Optional[int] , **lowerCamelCase : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ["""torch"""]
def __init__( self : str , *lowerCamelCase : List[Any] , **lowerCamelCase : str ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Tuple , *lowerCamelCase : Optional[int] , **lowerCamelCase : int ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : List[Any] , **lowerCamelCase : str ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ["""torch"""]
def __init__( self : str , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : List[str] , **lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[Any] , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Any ) -> Dict:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = ["""torch"""]
def __init__( self : Tuple , *lowerCamelCase : Optional[int] , **lowerCamelCase : Tuple ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Optional[Any] ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : str , *lowerCamelCase : Optional[int] , **lowerCamelCase : int ) -> List[str]:
requires_backends(cls , ["torch"] )
| 123 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase : Union[str, Any] =TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = TextaTextGenerationPipeline(model=__a , tokenizer=__a )
return generator, ["Something to write", "Something else"]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = generator("Something there" )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
__lowerCAmelCase = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
__lowerCAmelCase = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
with self.assertRaises(__a ):
generator(4 )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
__lowerCAmelCase = 3
__lowerCAmelCase = generator(
"Something there" , num_return_sequences=__a , num_beams=__a , )
__lowerCAmelCase = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(__a , __a )
__lowerCAmelCase = generator("This is a test" , do_sample=__a , num_return_sequences=2 , return_tensors=__a )
self.assertEqual(
__a , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
__lowerCAmelCase = generator.model.config.eos_token_id
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = generator(
["This is a test", "This is a second test"] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , )
self.assertEqual(
__a , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
| 57 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : str=13 , lowercase_ : List[Any]=7 , lowercase_ : List[Any]=True , lowercase_ : Optional[int]=True , lowercase_ : Any=True , lowercase_ : Union[str, Any]=True , lowercase_ : Any=99 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[str]=5 , lowercase_ : List[Any]=4 , lowercase_ : List[Any]=37 , lowercase_ : Dict="gelu" , lowercase_ : str=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : List[Any]=128 , lowercase_ : Optional[Any]=32 , lowercase_ : Optional[int]=16 , lowercase_ : List[Any]=2 , lowercase_ : Optional[Any]=0.02 , lowercase_ : Optional[Any]=3 , lowercase_ : List[str]=4 , lowercase_ : Any=None , ):
lowercase_ : Tuple = parent
lowercase_ : List[str] = batch_size
lowercase_ : Optional[int] = seq_length
lowercase_ : int = is_training
lowercase_ : str = use_input_mask
lowercase_ : str = use_token_type_ids
lowercase_ : Tuple = use_labels
lowercase_ : Dict = vocab_size
lowercase_ : str = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Optional[Any] = hidden_act
lowercase_ : List[str] = hidden_dropout_prob
lowercase_ : Any = attention_probs_dropout_prob
lowercase_ : str = max_position_embeddings
lowercase_ : Tuple = type_vocab_size
lowercase_ : str = type_sequence_label_size
lowercase_ : List[Any] = initializer_range
lowercase_ : Any = num_labels
lowercase_ : Optional[int] = num_choices
lowercase_ : Dict = scope
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Union[str, Any] = None
if self.use_input_mask:
lowercase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : List[Any] = None
if self.use_token_type_ids:
lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : Union[str, Any] = None
lowercase_ : Any = None
lowercase_ : Optional[int] = None
if self.use_labels:
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : List[str] = self.prepare_config_and_inputs()
lowercase_ : Any = True
lowercase_ : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : Any , lowercase_ : int , lowercase_ : str ):
lowercase_ : List[Any] = NezhaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : Any = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
lowercase_ : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Dict , lowercase_ : Any , lowercase_ : int , lowercase_ : Tuple , lowercase_ : List[str] , ):
lowercase_ : int = True
lowercase_ : Any = NezhaModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : Dict = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , )
lowercase_ : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , )
lowercase_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Union[str, Any] ):
lowercase_ : int = NezhaForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : List[str] ):
lowercase_ : str = NezhaForNextSentencePrediction(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : List[Any] ):
lowercase_ : int = NezhaForPreTraining(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : Optional[Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , next_sentence_label=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : str ):
lowercase_ : List[str] = NezhaForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : Any = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Optional[Any] ):
lowercase_ : Optional[Any] = self.num_labels
lowercase_ : Tuple = NezhaForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : List[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : List[str] , lowercase_ : int , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Optional[int] ):
lowercase_ : Dict = self.num_labels
lowercase_ : Optional[int] = NezhaForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : Tuple = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Optional[int] ):
lowercase_ : Union[str, Any] = self.num_choices
lowercase_ : Optional[Any] = NezhaForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Tuple = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Tuple = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Optional[Any] = config_and_inputs
lowercase_ : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, unittest.TestCase):
UpperCamelCase__ = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Tuple=False ):
lowercase_ : Dict = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[int] = NezhaModelTester(self )
lowercase_ : Dict = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
# This regression test was failing with PyTorch < 1.3
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : str = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase_ : Optional[int] = None
self.model_tester.create_and_check_model_as_decoder(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Any = NezhaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowercase_ : List[str] = True
lowercase_ : List[str] = model_class(config=_SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = torch.jit.trace(
_SCREAMING_SNAKE_CASE , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , """bert.pt""" ) )
lowercase_ : Dict = torch.jit.load(os.path.join(_SCREAMING_SNAKE_CASE , """bert.pt""" ) , map_location=_SCREAMING_SNAKE_CASE )
loaded(inputs_dict["""input_ids"""].to(_SCREAMING_SNAKE_CASE ) , inputs_dict["""attention_mask"""].to(_SCREAMING_SNAKE_CASE ) )
@require_torch
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
lowercase_ : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase_ : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase_ : Dict = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
lowercase_ : List[str] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = torch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : str = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
lowercase_ : Optional[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase_ : str = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
lowercase_ : Union[str, Any] = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = torch.tensor(
[[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 354 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_lowercase : Union[str, Any] = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 | 0 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
lowerCamelCase__ : Union[str, Any] = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
lowerCamelCase__ : List[Any] = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def UpperCamelCase ( _lowerCAmelCase : List[str] ) -> str:
_UpperCAmelCase : Dict = (images / 2 + 0.5).clamp(0, 1 )
_UpperCAmelCase : List[Any] = images.cpu().permute(0, 2, 3, 1 ).float().numpy()
_UpperCAmelCase : int = numpy_to_pil(_lowerCAmelCase )
return images
def UpperCamelCase ( _lowerCAmelCase : str ) -> Optional[Any]:
if images.ndim == 3:
_UpperCAmelCase : List[Any] = images[None, ...]
_UpperCAmelCase : Optional[Any] = (images * 255).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_UpperCAmelCase : List[Any] = [Image.fromarray(image.squeeze(), mode="""L""" ) for image in images]
else:
_UpperCAmelCase : List[Any] = [Image.fromarray(_lowerCAmelCase ) for image in images]
return pil_images
| 246 |
"""simple docstring"""
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def UpperCamelCase ( _lowerCAmelCase : Dict, _lowerCAmelCase : int=(), _lowerCAmelCase : Union[str, Any]=None, _lowerCAmelCase : Union[str, Any]="no", _lowerCAmelCase : Optional[int]="29500" ) -> Any:
_UpperCAmelCase : Any = False
_UpperCAmelCase : Dict = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
_UpperCAmelCase : Union[str, Any] = True
elif "IPython" in sys.modules:
_UpperCAmelCase : Dict = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() )
try:
_UpperCAmelCase : int = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""", _lowerCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
_UpperCAmelCase : List[Any] = 8
_UpperCAmelCase : int = PrepareForLaunch(_lowerCAmelCase, distributed_type="""TPU""" )
print(f'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(_lowerCAmelCase, args=_lowerCAmelCase, nprocs=_lowerCAmelCase, start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*_lowerCAmelCase )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_lowerCAmelCase, master_addr="""127.0.01""", master_port=_lowerCAmelCase, mixed_precision=_lowerCAmelCase ):
_UpperCAmelCase : Any = PrepareForLaunch(_lowerCAmelCase, distributed_type="""MULTI_GPU""" )
print(f'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(_lowerCAmelCase, args=_lowerCAmelCase, nprocs=_lowerCAmelCase, start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
_UpperCAmelCase : Union[str, Any] = """1"""
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : List[str]=(), _lowerCAmelCase : Optional[int]=2 ) -> Tuple:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_lowerCAmelCase, master_addr="""127.0.01""", master_port="""29500""", accelerate_mixed_precision="""no""", accelerate_debug_rdv_file=tmp_file.name, accelerate_use_cpu="""yes""", ):
_UpperCAmelCase : Tuple = PrepareForLaunch(_lowerCAmelCase, debug=_lowerCAmelCase )
start_processes(_lowerCAmelCase, args=_lowerCAmelCase, nprocs=_lowerCAmelCase, start_method="""fork""" )
| 246 | 1 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> int:
return int((input_a, input_a).count(0 ) != 0 )
def lowercase__ ( )-> None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 183 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowercase__ ( __UpperCamelCase )-> Dict: # picklable for multiprocessing
return x.sum()
def lowercase__ ( __UpperCamelCase )-> Tuple: # picklable for multiprocessing
return i + 1
@dataclass
class a_ :
lowercase = 42
lowercase = 42
class a_ ( lowerCamelCase ):
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = {}
UpperCamelCase = []
UpperCamelCase = 1
UpperCamelCase = [1, 2]
UpperCamelCase = {"""a""": 1, """b""": 2}
UpperCamelCase = {"""a""": [1, 2], """b""": [3, 4]}
UpperCamelCase = {"""a""": {"""1""": 1}, """b""": 2}
UpperCamelCase = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
UpperCamelCase = {}
UpperCamelCase = []
UpperCamelCase = 2
UpperCamelCase = [2, 3]
UpperCamelCase = {"""a""": 2, """b""": 3}
UpperCamelCase = {"""a""": [2, 3], """b""": [4, 5]}
UpperCamelCase = {"""a""": {"""1""": 2}, """b""": 3}
UpperCamelCase = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase = 2
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
UpperCamelCase = {"""a""": 2, """b""": 0, """c""": 2}
UpperCamelCase = {
"""a""": np.eye(2 ).astype(_SCREAMING_SNAKE_CASE ),
"""b""": np.zeros(3 ).astype(_SCREAMING_SNAKE_CASE ),
"""c""": np.ones(2 ).astype(_SCREAMING_SNAKE_CASE ),
}
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_SCREAMING_SNAKE_CASE ): # can't pickle a local lambda
map_nested(lambda _SCREAMING_SNAKE_CASE : x + 1 , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = {"""a""": 1, """b""": 2}
UpperCamelCase = {"""a""": 3, """b""": 4}
UpperCamelCase = {"""a""": 5, """b""": 6}
UpperCamelCase = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[str]:
"""simple docstring"""
class a_ :
lowercase = """bar"""
UpperCamelCase = Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(_SCREAMING_SNAKE_CASE , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
UpperCamelCase = {F"{i}": i for i in range(__UpperCamelCase )}
UpperCamelCase = map_nested(lambda __UpperCamelCase : x + 10 , __UpperCamelCase , num_proc=__UpperCamelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class a_ ( lowerCamelCase ):
@require_tf
def A__ ( self ) -> Any:
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase = layers.Dense(2 )
def gen_random_output():
UpperCamelCase = tf.random.uniform((1, 3) )
return model(_SCREAMING_SNAKE_CASE ).numpy()
with temp_seed(42 , set_tensorflow=_SCREAMING_SNAKE_CASE ):
UpperCamelCase = gen_random_output()
with temp_seed(42 , set_tensorflow=_SCREAMING_SNAKE_CASE ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def A__ ( self ) -> int:
"""simple docstring"""
import torch
def gen_random_output():
UpperCamelCase = torch.nn.Linear(3 , 2 )
UpperCamelCase = torch.rand(1 , 3 )
return model(_SCREAMING_SNAKE_CASE ).detach().numpy()
with temp_seed(42 , set_pytorch=_SCREAMING_SNAKE_CASE ):
UpperCamelCase = gen_random_output()
with temp_seed(42 , set_pytorch=_SCREAMING_SNAKE_CASE ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def A__ ( self ) -> Dict:
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase = gen_random_output()
with temp_seed(42 ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" , [{}] )
def lowercase__ ( __UpperCamelCase )-> List[str]:
UpperCamelCase = NestedDataStructure(__UpperCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> str:
UpperCamelCase = NestedDataStructure(__UpperCamelCase ).flatten()
assert output == expected_output
def lowercase__ ( )-> Union[str, Any]:
UpperCamelCase = A(x=1 , y="""foobar""" )
UpperCamelCase = {"""x""": 1, """y""": """foobar"""}
assert asdict(__UpperCamelCase ) == expected_output
UpperCamelCase = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
UpperCamelCase = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(__UpperCamelCase ) == expected_output
with pytest.raises(__UpperCamelCase ):
asdict([1, A(x=10 , y="""foo""" )] )
def lowercase__ ( __UpperCamelCase )-> List[Any]:
return text.split()
def lowercase__ ( __UpperCamelCase )-> List[str]:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def lowercase__ ( )-> int:
with Pool(2 ) as pool:
UpperCamelCase = list(iflatmap_unordered(__UpperCamelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(__UpperCamelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase = list(iflatmap_unordered(__UpperCamelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(__UpperCamelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase = []
for yield_time, content in iflatmap_unordered(
__UpperCamelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__UpperCamelCase )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(__UpperCamelCase ) == 4
| 183 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : Any = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase_ = '''dinat'''
lowerCAmelCase_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=[3, 4, 6, 5] , __SCREAMING_SNAKE_CASE=[2, 4, 8, 16] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , __SCREAMING_SNAKE_CASE=3.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = patch_size
lowercase_ : Any = num_channels
lowercase_ : str = embed_dim
lowercase_ : List[str] = depths
lowercase_ : Any = len(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = num_heads
lowercase_ : Tuple = kernel_size
lowercase_ : Union[str, Any] = dilations
lowercase_ : List[str] = mlp_ratio
lowercase_ : Any = qkv_bias
lowercase_ : List[str] = hidden_dropout_prob
lowercase_ : Union[str, Any] = attention_probs_dropout_prob
lowercase_ : Tuple = drop_path_rate
lowercase_ : List[str] = hidden_act
lowercase_ : List[Any] = layer_norm_eps
lowercase_ : int = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase_ : List[Any] = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
lowercase_ : Dict = layer_scale_init_value
lowercase_ : Optional[int] = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
lowercase_ , lowercase_ : Optional[Any] = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 93 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase : List[str] = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 107 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__A : Tuple = logging.get_logger(__name__)
__A : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A : List[str] = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A : Optional[Any] = {
"yjernite/retribert-base-uncased": 512,
}
__A : int = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = RetriBertTokenizer
lowercase = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : List[str]=None , lowerCamelCase : str=True , lowerCamelCase : str="[UNK]" , lowerCamelCase : str="[SEP]" , lowerCamelCase : Optional[int]="[PAD]" , lowerCamelCase : List[Any]="[CLS]" , lowerCamelCase : Optional[Any]="[MASK]" , lowerCamelCase : Dict=True , lowerCamelCase : int=None , **lowerCamelCase : str , ) -> Optional[int]:
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , do_lower_case=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , tokenize_chinese_chars=lowerCamelCase , strip_accents=lowerCamelCase , **lowerCamelCase , )
lowerCAmelCase_ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase ) != tokenize_chinese_chars
):
lowerCAmelCase_ : int = getattr(lowerCamelCase , normalizer_state.pop("""type""" ) )
lowerCAmelCase_ : Dict = do_lower_case
lowerCAmelCase_ : Optional[Any] = strip_accents
lowerCAmelCase_ : List[str] = tokenize_chinese_chars
lowerCAmelCase_ : Tuple = normalizer_class(**lowerCamelCase )
lowerCAmelCase_ : Tuple = do_lower_case
def __lowercase ( self : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]=None ) -> int:
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : Dict , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Dict , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase_ : Optional[int] = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
| 358 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase_ ( A__ : List[Any] , A__ : str=False ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("""head""" ):
lowerCAmelCase_ : str = """segformer.encoder.""" + key
if key.startswith("""backbone""" ):
lowerCAmelCase_ : str = key.replace("""backbone""" , """segformer.encoder""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase_ : List[str] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
lowerCAmelCase_ : List[Any] = key.replace(f'patch_embed{idx}' , f'patch_embeddings.{int(A__ )-1}' )
if "norm" in key:
lowerCAmelCase_ : Any = key.replace("""norm""" , """layer_norm""" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase_ : Tuple = key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )]
lowerCAmelCase_ : int = key.replace(f'layer_norm{idx}' , f'layer_norm.{int(A__ )-1}' )
if "layer_norm1" in key:
lowerCAmelCase_ : str = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
lowerCAmelCase_ : Union[str, Any] = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase_ : Any = key[key.find("""block""" ) + len("""block""" )]
lowerCAmelCase_ : str = key.replace(f'block{idx}' , f'block.{int(A__ )-1}' )
if "attn.q" in key:
lowerCAmelCase_ : List[Any] = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
lowerCAmelCase_ : Optional[int] = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
lowerCAmelCase_ : str = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
lowerCAmelCase_ : Optional[Any] = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
lowerCAmelCase_ : Optional[Any] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
lowerCAmelCase_ : List[Any] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
lowerCAmelCase_ : Optional[Any] = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
lowerCAmelCase_ : Any = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase_ : str = key[key.find("""linear_c""" ) + len("""linear_c""" )]
lowerCAmelCase_ : Dict = key.replace(f'linear_c{idx}' , f'linear_c.{int(A__ )-1}' )
if key.startswith("""head""" ):
lowerCAmelCase_ : int = key.replace("""head""" , """classifier""" )
lowerCAmelCase_ : int = value
return new_state_dict
def UpperCamelCase_ ( A__ : int , A__ : Union[str, Any] ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase_ : int = state_dict.pop(f'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
lowerCAmelCase_ : Optional[int] = state_dict.pop(f'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase_ : List[str] = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase_ : Optional[Any] = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase_ : Union[str, Any] = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase_ : str = kv_bias[
config.hidden_sizes[i] :
]
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ : Optional[Any] = Image.open(requests.get(A__ , stream=A__ ).raw )
return image
@torch.no_grad()
def UpperCamelCase_ ( A__ : Optional[Any] , A__ : List[Any] , A__ : Tuple ):
'''simple docstring'''
lowerCAmelCase_ : str = SegformerConfig()
lowerCAmelCase_ : Optional[Any] = False
# set attributes based on model_name
lowerCAmelCase_ : int = """huggingface/label-files"""
if "segformer" in model_name:
lowerCAmelCase_ : Optional[int] = model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2]
if "ade" in model_name:
lowerCAmelCase_ : List[Any] = 1_50
lowerCAmelCase_ : int = """ade20k-id2label.json"""
lowerCAmelCase_ : Tuple = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
lowerCAmelCase_ : List[str] = 19
lowerCAmelCase_ : Dict = """cityscapes-id2label.json"""
lowerCAmelCase_ : List[str] = (1, 19, 1_28, 1_28)
else:
raise ValueError(f'Model {model_name} not supported' )
elif "mit" in model_name:
lowerCAmelCase_ : Dict = True
lowerCAmelCase_ : Optional[int] = model_name[4:6]
lowerCAmelCase_ : Union[str, Any] = 10_00
lowerCAmelCase_ : int = """imagenet-1k-id2label.json"""
lowerCAmelCase_ : Optional[Any] = (1, 10_00)
else:
raise ValueError(f'Model {model_name} not supported' )
# set config attributes
lowerCAmelCase_ : Optional[Any] = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : List[Any] = {int(A__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : List[str] = idalabel
lowerCAmelCase_ : List[Any] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
lowerCAmelCase_ : Any = [64, 1_28, 3_20, 5_12]
lowerCAmelCase_ : int = 2_56
elif size == "b2":
lowerCAmelCase_ : Any = [64, 1_28, 3_20, 5_12]
lowerCAmelCase_ : List[str] = 7_68
lowerCAmelCase_ : Any = [3, 4, 6, 3]
elif size == "b3":
lowerCAmelCase_ : List[str] = [64, 1_28, 3_20, 5_12]
lowerCAmelCase_ : Union[str, Any] = 7_68
lowerCAmelCase_ : Union[str, Any] = [3, 4, 18, 3]
elif size == "b4":
lowerCAmelCase_ : Tuple = [64, 1_28, 3_20, 5_12]
lowerCAmelCase_ : Tuple = 7_68
lowerCAmelCase_ : Tuple = [3, 8, 27, 3]
elif size == "b5":
lowerCAmelCase_ : Union[str, Any] = [64, 1_28, 3_20, 5_12]
lowerCAmelCase_ : str = 7_68
lowerCAmelCase_ : Any = [3, 6, 40, 3]
else:
raise ValueError(f'Size {size} not supported' )
# load image processor (only resize + normalize)
lowerCAmelCase_ : List[Any] = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=A__ , align=A__ , do_random_crop=A__ )
# prepare image
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Union[str, Any] = image_processor(images=A__ , return_tensors="""pt""" ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
if encoder_only:
lowerCAmelCase_ : str = torch.load(A__ , map_location=torch.device("""cpu""" ) )
else:
lowerCAmelCase_ : List[str] = torch.load(A__ , map_location=torch.device("""cpu""" ) )["""state_dict"""]
# rename keys
lowerCAmelCase_ : Dict = rename_keys(A__ , encoder_only=A__ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(A__ , A__ )
# create HuggingFace model and load state dict
if encoder_only:
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : List[Any] = SegformerForImageClassification(A__ )
else:
lowerCAmelCase_ : str = SegformerForSemanticSegmentation(A__ )
model.load_state_dict(A__ )
model.eval()
# forward pass
lowerCAmelCase_ : Tuple = model(A__ )
lowerCAmelCase_ : Union[str, Any] = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
lowerCAmelCase_ : Tuple = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
lowerCAmelCase_ : List[str] = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
lowerCAmelCase_ : List[str] = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
lowerCAmelCase_ : Dict = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
lowerCAmelCase_ : Dict = torch.tensor(
[
[
[-1.13_72E01, -1.27_87E01, -1.34_77E01],
[-1.25_36E01, -1.41_94E01, -1.44_09E01],
[-1.32_17E01, -1.48_88E01, -1.53_27E01],
],
[
[-1.47_91E01, -1.71_22E01, -1.82_77E01],
[-1.71_63E01, -1.91_92E01, -1.95_33E01],
[-1.78_97E01, -1.99_91E01, -2.03_15E01],
],
[
[7.67_23E-01, 4.19_21E-01, -7.78_78E-02],
[4.77_72E-01, 9.55_57E-03, -2.80_82E-01],
[3.60_32E-01, -2.48_26E-01, -5.11_68E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
lowerCAmelCase_ : str = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
lowerCAmelCase_ : Optional[int] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
lowerCAmelCase_ : int = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
lowerCAmelCase_ : Optional[Any] = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , A__ , atol=1E-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="segformer.b0.512x512.ade.160k",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__A : Tuple = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 89 | 0 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class snake_case :
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( *lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Union[str, Any] ):
pass
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = np.array(lowerCamelCase )
UpperCAmelCase__ = npimg.shape
return {"hash": hashimage(lowerCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
snake_case__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
snake_case__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : str ):
UpperCAmelCase__ = MaskGenerationPipeline(model=lowerCamelCase__ ,image_processor=lowerCamelCase__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str ):
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def __lowerCAmelCase ( self : Optional[Any] ):
pass
@slow
@require_torch
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = pipeline('mask-generation' ,model='facebook/sam-vit-huge' )
UpperCAmelCase__ = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' ,points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase__ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase__ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_9_6_7},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_9_3},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_9_0_9},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_8_7_9},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_8_3_4},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_7_1_6},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_6_1_2},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_5_9_9},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_5_5_2},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_5_3_2},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_5_1_6},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_4_9_9},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_4_8_3},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_4_6_4},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_4_0_8},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_3_3_5},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_3_2_6},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_2_6_2},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_9_9_9},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_9_8_6},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_9_8_4},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_8_7_3},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_8_7_1}
] ,)
# fmt: on
@require_torch
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = 'facebook/sam-vit-huge'
UpperCAmelCase__ = pipeline('mask-generation' ,model=lowerCamelCase__ )
UpperCAmelCase__ = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,pred_iou_thresh=1 ,points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase__ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase__ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1_0},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
] ,)
| 98 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
A__ : Optional[Any] =[num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
_lowerCAmelCase = []
for num in range(len(lowerCAmelCase ) ):
_lowerCAmelCase = 0
while 2 * i * i <= odd_composites[num]:
_lowerCAmelCase = odd_composites[num] - 2 * i * i
if is_prime(lowerCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCAmelCase ) == n:
return list_nums
return []
def UpperCamelCase__ ( ):
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 70 | 0 |
from __future__ import annotations
A : Dict = "#"
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict ) -> None:
SCREAMING_SNAKE_CASE_ = {}
def __A ( self : List[Any] , __magic_name__ : str ) -> None:
SCREAMING_SNAKE_CASE_ = self._trie
for char in text:
if char not in trie:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = trie[char]
SCREAMING_SNAKE_CASE_ = True
def __A ( self : Union[str, Any] , __magic_name__ : str ) -> tuple | list:
SCREAMING_SNAKE_CASE_ = self._trie
for char in prefix:
if char in trie:
SCREAMING_SNAKE_CASE_ = trie[char]
else:
return []
return self._elements(__magic_name__ )
def __A ( self : int , __magic_name__ : dict ) -> tuple:
SCREAMING_SNAKE_CASE_ = []
for c, v in d.items():
SCREAMING_SNAKE_CASE_ = [" "] if c == END else [(c + s) for s in self._elements(__magic_name__ )]
result.extend(__magic_name__ )
return tuple(__magic_name__ )
A : Union[str, Any] = Trie()
A : Optional[int] = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = trie.find_word(__UpperCamelCase )
return tuple(string + word for word in suffixes )
def a__ ( ):
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 363 | import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__UpperCamelCase )] )
SCREAMING_SNAKE_CASE_ = np.array(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __UpperCamelCase ) ) , x.transpose() ) , __UpperCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = (1, 2, 1)
SCREAMING_SNAKE_CASE_ = (1, 1, 0, 7)
SCREAMING_SNAKE_CASE_ = SARIMAX(
__UpperCamelCase , exog=__UpperCamelCase , order=__UpperCamelCase , seasonal_order=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = model.fit(disp=__UpperCamelCase , maxiter=6_0_0 , method="nm" )
SCREAMING_SNAKE_CASE_ = model_fit.predict(1 , len(__UpperCamelCase ) , exog=[test_match] )
return result[0]
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = regressor.predict(__UpperCamelCase )
return y_pred[0]
def a__ ( __UpperCamelCase ):
train_user.sort()
SCREAMING_SNAKE_CASE_ = np.percentile(__UpperCamelCase , 2_5 )
SCREAMING_SNAKE_CASE_ = np.percentile(__UpperCamelCase , 7_5 )
SCREAMING_SNAKE_CASE_ = qa - qa
SCREAMING_SNAKE_CASE_ = qa - (iqr * 0.1)
return low_lim
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
for i in list_vote:
if i > actual_result:
SCREAMING_SNAKE_CASE_ = not_safe + 1
else:
if abs(abs(__UpperCamelCase ) - abs(__UpperCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
A : Dict = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]]
A : Optional[Any] = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
A : Union[str, Any] = Normalizer().fit_transform(data_input_df.values)
# split data
A : Optional[int] = normalize_df[:, 2].tolist()
A : List[str] = normalize_df[:, 0].tolist()
A : int = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
A : int = normalize_df[:, [1, 2]].tolist()
A : Tuple = x[: len(x) - 1]
A : str = x[len(x) - 1 :]
# for linear regression & sarimax
A : Tuple = total_date[: len(total_date) - 1]
A : Optional[int] = total_user[: len(total_user) - 1]
A : str = total_match[: len(total_match) - 1]
A : List[Any] = total_date[len(total_date) - 1 :]
A : List[Any] = total_user[len(total_user) - 1 :]
A : Optional[Any] = total_match[len(total_match) - 1 :]
# voting system with forecasting
A : Optional[int] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
A : str = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 305 | 0 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 48 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> float:
_lowercase : Tuple = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCamelCase_( ) -> Optional[int]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__UpperCamelCase : Union[str, Any] = "<<<<<<< This should probably be modified because it mentions: "
__UpperCamelCase : Optional[int] = "=======\n>>>>>>>\n"
__UpperCamelCase : Optional[Any] = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
__UpperCamelCase : Dict = [
# (pattern, replacement)
# Order is important here for some replacements
(r"tfds\.core", r"datasets"),
(r"tf\.io\.gfile\.GFile", r"open"),
(r"tf\.([\w\d]+)", r"datasets.Value('\1')"),
(r"tfds\.features\.Text\(\)", r"datasets.Value('string')"),
(r"tfds\.features\.Text\(", r"datasets.Value('string'),"),
(r"features\s*=\s*tfds.features.FeaturesDict\(", r"features=datasets.Features("),
(r"tfds\.features\.FeaturesDict\(", r"dict("),
(r"The TensorFlow Datasets Authors", r"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(r"tfds\.", r"datasets."),
(r"dl_manager\.manual_dir", r"self.config.data_dir"),
(r"self\.builder_config", r"self.config"),
]
def A ( _lowercase ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowercase__ ( lowercase_):
@staticmethod
def __A ( UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=a__ , required=a__ , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=a__ , required=a__ , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=a__ )
def __init__( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , *UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = get_logger('''datasets-cli/converting''' )
SCREAMING_SNAKE_CASE : int = tfds_path
SCREAMING_SNAKE_CASE : int = datasets_directory
def __A ( self : Optional[Any] ):
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
SCREAMING_SNAKE_CASE : Tuple = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
SCREAMING_SNAKE_CASE : Optional[int] = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
SCREAMING_SNAKE_CASE : List[Any] = os.path.abspath(self._datasets_directory )
self._logger.info(f"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : str = {}
if os.path.isdir(self._tfds_path ):
SCREAMING_SNAKE_CASE : List[str] = os.listdir(a__ )
else:
SCREAMING_SNAKE_CASE : Any = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"""Looking at file {f_name}""" )
SCREAMING_SNAKE_CASE : Tuple = os.path.join(a__ , a__ )
SCREAMING_SNAKE_CASE : str = os.path.join(a__ , a__ )
if not os.path.isfile(a__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(a__ , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : Union[str, Any] = f.readlines()
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[int] = []
for line in lines:
SCREAMING_SNAKE_CASE : int = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
SCREAMING_SNAKE_CASE : int = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
SCREAMING_SNAKE_CASE : Dict = ''''''
continue
elif "from absl import logging" in out_line:
SCREAMING_SNAKE_CASE : List[str] = '''from datasets import logging\n'''
elif "getLogger" in out_line:
SCREAMING_SNAKE_CASE : Union[str, Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Tuple = list(filter(lambda UpperCamelCase__ : e in out_line , a__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(a__ ) + '''\n''' )
out_lines.append(a__ )
out_lines.append(a__ )
continue
else:
for pattern, replacement in TO_CONVERT:
SCREAMING_SNAKE_CASE : str = re.sub(a__ , a__ , a__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
SCREAMING_SNAKE_CASE : Optional[Any] = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , a__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
SCREAMING_SNAKE_CASE : Any = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
SCREAMING_SNAKE_CASE : Any = True
out_lines.append(a__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
SCREAMING_SNAKE_CASE : List[Any] = f_name.replace('''.py''' , '''''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(a__ , a__ )
SCREAMING_SNAKE_CASE : str = os.path.join(a__ , a__ )
os.makedirs(a__ , exist_ok=a__ )
self._logger.info(f"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(a__ )
if needs_manual_update:
with_manual_update.append(a__ )
with open(a__ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(a__ )
self._logger.info(f"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
SCREAMING_SNAKE_CASE : Any = os.path.basename(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(f"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(a__ , a__ )
except KeyError:
self._logger.error(f"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"""You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.""" )
| 354 | import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase__ :
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]="resnet50" , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Any=True , UpperCamelCase__ : int=True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : Union[str, Any] = out_indices if out_indices is not None else [4]
SCREAMING_SNAKE_CASE : List[Any] = stage_names
SCREAMING_SNAKE_CASE : int = out_features
SCREAMING_SNAKE_CASE : Optional[int] = backbone
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE : Dict = image_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : List[Any] = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Dict = is_training
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values
def __A ( self : List[Any] ):
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __A ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = TimmBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(UpperCamelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = config_and_inputs
SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = (TimmBackbone,) if is_torch_available() else ()
UpperCamelCase_ = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TimmBackboneModelTester(self )
SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def __A ( self : List[Any] ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''resnet18'''
SCREAMING_SNAKE_CASE : str = '''microsoft/resnet-18'''
SCREAMING_SNAKE_CASE : Dict = AutoBackbone.from_pretrained(UpperCamelCase__ , use_timm_backbone=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = AutoBackbone.from_pretrained(UpperCamelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
SCREAMING_SNAKE_CASE : List[str] = AutoBackbone.from_pretrained(UpperCamelCase__ , use_timm_backbone=UpperCamelCase__ , out_indices=[1, 2, 3] )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoBackbone.from_pretrained(UpperCamelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def __A ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def __A ( self : int ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def __A ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __A ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __A ( self : Any ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def __A ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __A ( self : int ):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __A ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def __A ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def __A ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self : int ):
'''simple docstring'''
pass
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Any = self.has_attentions
# no need to test all models as different heads yield the same functionality
SCREAMING_SNAKE_CASE : Any = self.all_model_classes[0]
SCREAMING_SNAKE_CASE : List[str] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = outputs[0][-1]
# Encoder-/Decoder-only models
SCREAMING_SNAKE_CASE : List[Any] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
SCREAMING_SNAKE_CASE : Any = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCamelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(**UpperCamelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
SCREAMING_SNAKE_CASE : List[str] = copy.deepcopy(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(**UpperCamelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(**UpperCamelCase__ )
| 258 | 0 |
"""simple docstring"""
from typing import Any
class a :
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Any ) -> List[Any]:
lowerCamelCase_ = data
lowerCamelCase_ = None
class a :
def __init__( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase_ = None
def UpperCamelCase ( self : Dict ) -> Optional[int]:
lowerCamelCase_ = self.head
while temp is not None:
print(temp.data , end=' ' )
lowerCamelCase_ = temp.next
print()
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
lowerCamelCase_ = Node(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.head
lowerCamelCase_ = new_node
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
if node_data_a == node_data_a:
return
else:
lowerCamelCase_ = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCamelCase_ = node_a.next
lowerCamelCase_ = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCamelCase_ = node_a.next
if node_a is None or node_a is None:
return
lowerCamelCase_ , lowerCamelCase_ = node_a.data, node_a.data
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 183 |
"""simple docstring"""
import argparse
import json
import subprocess
def lowerCamelCase__ ( _lowerCamelCase : Tuple , _lowerCamelCase : str ) -> List[Any]:
lowerCamelCase_ = []
lowerCamelCase_ = (
F'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowerCamelCase_ = subprocess.run(_lowerCamelCase , shell=_lowerCamelCase , stdout=subprocess.PIPE )
lowerCamelCase_ = output.stdout.decode('utf-8' )
lowerCamelCase_ = json.loads(_lowerCamelCase )
lowerCamelCase_ = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowerCamelCase )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
lowerCamelCase_ = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def lowerCamelCase__ ( _lowerCamelCase : Dict ) -> Tuple:
return values.split(',' )
_SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 183 | 1 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
super().__init__()
_lowerCamelCase = value_function
_lowerCamelCase = unet
_lowerCamelCase = scheduler
_lowerCamelCase = env
_lowerCamelCase = env.get_dataset()
_lowerCamelCase = {}
for key in self.data.keys():
try:
_lowerCamelCase = self.data[key].mean()
except: # noqa: E722
pass
_lowerCamelCase = {}
for key in self.data.keys():
try:
_lowerCamelCase = self.data[key].std()
except: # noqa: E722
pass
_lowerCamelCase = env.observation_space.shape[0]
_lowerCamelCase = env.action_space.shape[0]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
return (x_in - self.means[key]) / self.stds[key]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
return x_in * self.stds[key] + self.means[key]
def snake_case__ ( self , lowerCamelCase__ ):
if type(lowerCamelCase__ ) is dict:
return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase__ , device=self.unet.device )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for key, val in cond.items():
_lowerCamelCase = val.clone()
return x_in
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = x.shape[0]
_lowerCamelCase = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
_lowerCamelCase = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCamelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
_lowerCamelCase = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample
_lowerCamelCase = torch.autograd.grad([y.sum()] , [x] )[0]
_lowerCamelCase = self.scheduler._get_variance(lowerCamelCase__ )
_lowerCamelCase = torch.exp(0.5 * posterior_variance )
_lowerCamelCase = model_std * grad
_lowerCamelCase = 0
_lowerCamelCase = x.detach()
_lowerCamelCase = x + scale * grad
_lowerCamelCase = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
_lowerCamelCase = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
_lowerCamelCase = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
_lowerCamelCase = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
_lowerCamelCase = self.to_torch(lowerCamelCase__ )
return x, y
def __call__( self , lowerCamelCase__ , lowerCamelCase__=6_4 , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=0.1 ):
# normalize the observations and create batch dimension
_lowerCamelCase = self.normalize(lowerCamelCase__ , '''observations''' )
_lowerCamelCase = obs[None].repeat(lowerCamelCase__ , axis=0 )
_lowerCamelCase = {0: self.to_torch(lowerCamelCase__ )}
_lowerCamelCase = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
_lowerCamelCase = randn_tensor(lowerCamelCase__ , device=self.unet.device )
_lowerCamelCase = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
_lowerCamelCase = self.to_torch(lowerCamelCase__ )
# run the diffusion process
_lowerCamelCase , _lowerCamelCase = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# sort output trajectories by value
_lowerCamelCase = y.argsort(0 , descending=lowerCamelCase__ ).squeeze()
_lowerCamelCase = x[sorted_idx]
_lowerCamelCase = sorted_values[:, :, : self.action_dim]
_lowerCamelCase = actions.detach().cpu().numpy()
_lowerCamelCase = self.de_normalize(lowerCamelCase__ , key='''actions''' )
# select the action with the highest value
if y is not None:
_lowerCamelCase = 0
else:
# if we didn't run value guiding, select a random action
_lowerCamelCase = np.random.randint(0 , lowerCamelCase__ )
_lowerCamelCase = denorm_actions[selected_index, 0]
return denorm_actions
| 73 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Dict = {
'''configuration_blenderbot_small''': [
'''BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotSmallConfig''',
'''BlenderbotSmallOnnxConfig''',
],
'''tokenization_blenderbot_small''': ['''BlenderbotSmallTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ['''BlenderbotSmallTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'''BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotSmallForCausalLM''',
'''BlenderbotSmallForConditionalGeneration''',
'''BlenderbotSmallModel''',
'''BlenderbotSmallPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
'''TFBlenderbotSmallForConditionalGeneration''',
'''TFBlenderbotSmallModel''',
'''TFBlenderbotSmallPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = [
'''FlaxBlenderbotSmallForConditionalGeneration''',
'''FlaxBlenderbotSmallModel''',
'''FlaxBlenderbotSmallPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : List[Any] = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class A__ ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'cvt'
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any]=3 , _SCREAMING_SNAKE_CASE: List[str]=[7, 3, 3] , _SCREAMING_SNAKE_CASE: List[str]=[4, 2, 2] , _SCREAMING_SNAKE_CASE: Optional[Any]=[2, 1, 1] , _SCREAMING_SNAKE_CASE: List[str]=[64, 192, 384] , _SCREAMING_SNAKE_CASE: Tuple=[1, 3, 6] , _SCREAMING_SNAKE_CASE: Optional[Any]=[1, 2, 10] , _SCREAMING_SNAKE_CASE: Optional[int]=[4.0, 4.0, 4.0] , _SCREAMING_SNAKE_CASE: Optional[Any]=[0.0, 0.0, 0.0] , _SCREAMING_SNAKE_CASE: List[str]=[0.0, 0.0, 0.0] , _SCREAMING_SNAKE_CASE: List[str]=[0.0, 0.0, 0.1] , _SCREAMING_SNAKE_CASE: Dict=[True, True, True] , _SCREAMING_SNAKE_CASE: Dict=[False, False, True] , _SCREAMING_SNAKE_CASE: str=["dw_bn", "dw_bn", "dw_bn"] , _SCREAMING_SNAKE_CASE: int=[3, 3, 3] , _SCREAMING_SNAKE_CASE: Optional[int]=[1, 1, 1] , _SCREAMING_SNAKE_CASE: List[str]=[2, 2, 2] , _SCREAMING_SNAKE_CASE: List[str]=[1, 1, 1] , _SCREAMING_SNAKE_CASE: Optional[Any]=[1, 1, 1] , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.02 , _SCREAMING_SNAKE_CASE: int=1e-12 , **_SCREAMING_SNAKE_CASE: Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**_UpperCAmelCase)
__lowerCAmelCase : Any = num_channels
__lowerCAmelCase : List[Any] = patch_sizes
__lowerCAmelCase : int = patch_stride
__lowerCAmelCase : List[Any] = patch_padding
__lowerCAmelCase : Any = embed_dim
__lowerCAmelCase : List[str] = num_heads
__lowerCAmelCase : Tuple = depth
__lowerCAmelCase : int = mlp_ratio
__lowerCAmelCase : List[Any] = attention_drop_rate
__lowerCAmelCase : Optional[int] = drop_rate
__lowerCAmelCase : str = drop_path_rate
__lowerCAmelCase : Tuple = qkv_bias
__lowerCAmelCase : str = cls_token
__lowerCAmelCase : Optional[int] = qkv_projection_method
__lowerCAmelCase : Any = kernel_qkv
__lowerCAmelCase : Any = padding_kv
__lowerCAmelCase : str = stride_kv
__lowerCAmelCase : Tuple = padding_q
__lowerCAmelCase : List[str] = stride_q
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : Dict = layer_norm_eps | 269 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = BarthezTokenizer
lowerCAmelCase : int = BarthezTokenizerFast
lowerCAmelCase : Dict = True
lowerCAmelCase : str = True
def __lowercase ( self : List[Any] ):
super().setUp()
_a : List[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname ,legacy_format=_UpperCAmelCase )
_a : Union[str, Any] = tokenizer
def __lowercase ( self : Tuple ):
_a : Optional[Any] = '<pad>'
_a : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) ,_UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) ,_UpperCAmelCase )
def __lowercase ( self : str ):
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-1] ,'<mask>' )
self.assertEqual(len(_UpperCAmelCase ) ,101122 )
def __lowercase ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size ,101122 )
@require_torch
def __lowercase ( self : Dict ):
_a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_a : Dict = [0, 57, 3018, 70307, 91, 2]
_a : Dict = self.tokenizer(
_UpperCAmelCase ,max_length=len(_UpperCAmelCase ) ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase )
self.assertEqual((2, 6) ,batch.input_ids.shape )
self.assertEqual((2, 6) ,batch.attention_mask.shape )
_a : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
_a : str = self.get_tokenizer()
_a : List[str] = self.get_rust_tokenizer()
_a : Dict = 'I was born in 92000, and this is falsé.'
_a : List[Any] = tokenizer.tokenize(_UpperCAmelCase )
_a : Tuple = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Optional[Any] = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Union[str, Any] = self.get_rust_tokenizer()
_a : Any = tokenizer.encode(_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
@slow
def __lowercase ( self : Optional[int] ):
# fmt: off
_a : Optional[int] = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_a : Optional[Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=_UpperCAmelCase ,)
| 89 | 0 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : List[Any] = tmp_path / "cache"
lowerCAmelCase : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase : str = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tmp_path / "cache"
lowerCAmelCase : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase : Tuple = features.copy() if features else default_expected_features
lowerCAmelCase : List[str] = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase : List[str] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : List[Any] = tmp_path / "cache"
lowerCAmelCase : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase : Tuple = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Optional[Any] = parquet_path
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Dict = [parquet_path]
lowerCAmelCase : Optional[Any] = tmp_path / "cache"
lowerCAmelCase : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase : Optional[Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple=("train",) ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for split in splits:
lowerCAmelCase : List[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Tuple = tmp_path / "cache"
lowerCAmelCase : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase : Optional[Any] = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Any = tmp_path / "cache"
lowerCAmelCase : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase : List[Any] = features.copy() if features else default_expected_features
lowerCAmelCase : List[Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase : Optional[int] = ParquetDatasetReader({"train": parquet_path} , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
if split:
lowerCAmelCase : List[str] = {split: parquet_path}
else:
lowerCAmelCase : List[str] = "train"
lowerCAmelCase : str = {"train": parquet_path, "test": parquet_path}
lowerCAmelCase : Optional[int] = tmp_path / "cache"
lowerCAmelCase : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase : List[str] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = ParquetDatasetWriter(SCREAMING_SNAKE_CASE , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCAmelCase : List[str] = pq.ParquetFile(tmp_path / "foo.parquet" )
lowerCAmelCase : Union[str, Any] = pf.read()
assert dataset.data.table == output_table
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : str = str(shared_datadir / "test_image_rgb.jpg" )
lowerCAmelCase : List[str] = {"image": [image_path]}
lowerCAmelCase : str = Features({"image": Image()} )
lowerCAmelCase : Optional[Any] = Dataset.from_dict(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = ParquetDatasetWriter(SCREAMING_SNAKE_CASE , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCAmelCase : Any = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowerCAmelCase : Optional[int] = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=SCREAMING_SNAKE_CASE ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
assert get_writer_batch_size(SCREAMING_SNAKE_CASE ) == expected
| 133 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
lowerCAmelCase__ = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Any ="tapas"
def __init__( self , snake_case__=30_522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1_024 , snake_case__=[3, 256, 256, 2, 256, 256, 10] , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__=10.0 , snake_case__=0 , snake_case__=1.0 , snake_case__=None , snake_case__=1.0 , snake_case__=False , snake_case__=None , snake_case__=1.0 , snake_case__=1.0 , snake_case__=False , snake_case__=False , snake_case__="ratio" , snake_case__=None , snake_case__=None , snake_case__=64 , snake_case__=32 , snake_case__=False , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=True , snake_case__=False , snake_case__=None , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : List[str] = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Optional[Any] = hidden_act
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : Any = max_position_embeddings
lowerCAmelCase : Dict = type_vocab_sizes
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = layer_norm_eps
# Fine-tuning task hyperparameters
lowerCAmelCase : Dict = positive_label_weight
lowerCAmelCase : Union[str, Any] = num_aggregation_labels
lowerCAmelCase : Optional[Any] = aggregation_loss_weight
lowerCAmelCase : List[Any] = use_answer_as_supervision
lowerCAmelCase : Dict = answer_loss_importance
lowerCAmelCase : List[Any] = use_normalized_answer_loss
lowerCAmelCase : List[str] = huber_loss_delta
lowerCAmelCase : Optional[int] = temperature
lowerCAmelCase : Optional[int] = aggregation_temperature
lowerCAmelCase : Any = use_gumbel_for_cells
lowerCAmelCase : Union[str, Any] = use_gumbel_for_aggregation
lowerCAmelCase : Union[str, Any] = average_approximation_function
lowerCAmelCase : int = cell_selection_preference
lowerCAmelCase : Dict = answer_loss_cutoff
lowerCAmelCase : Optional[int] = max_num_rows
lowerCAmelCase : Union[str, Any] = max_num_columns
lowerCAmelCase : Any = average_logits_per_cell
lowerCAmelCase : List[Any] = select_one_column
lowerCAmelCase : Tuple = allow_empty_column_selection
lowerCAmelCase : str = init_cell_selection_weights_to_zero
lowerCAmelCase : List[Any] = reset_position_index_per_cell
lowerCAmelCase : Optional[Any] = disable_per_token_loss
# Aggregation hyperparameters
lowerCAmelCase : List[str] = aggregation_labels
lowerCAmelCase : List[str] = no_aggregation_label_index
if isinstance(self.aggregation_labels , snake_case__ ):
lowerCAmelCase : Union[str, Any] = {int(snake_case__ ): v for k, v in aggregation_labels.items()}
| 133 | 1 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 335 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
return x + 2
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ = """x = 3"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3} )
lowercase__ = """x = y"""
lowercase__ = {"""y""": 5}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 5, """y""": 5} )
def lowerCamelCase__ (self : str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = """y = add_two(x)"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = """x = 3"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3} )
def lowerCamelCase__ (self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """test_dict = {'x': x, 'y': add_two(x)}"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """x = 3\ny = 5"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
def lowerCamelCase__ (self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = """text = f'This is x: {x}.'"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """text""": """This is x: 3."""} )
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
lowercase__ = """if x <= 3:\n y = 2\nelse:\n y = 5"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 2} )
lowercase__ = {"""x""": 8}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 8, """y""": 5} )
def lowerCamelCase__ (self : Dict ) -> int:
"""simple docstring"""
lowercase__ = """test_list = [x, add_two(x)]"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [3, 5] )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_list""": [3, 5]} )
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
lowercase__ = """y = x"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 3} )
def lowerCamelCase__ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """test_list = [x, add_two(x)]\ntest_list[1]"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_list""": [3, 5]} )
lowercase__ = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = """x = 0\nfor i in range(3):\n x = i"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {"""range""": range} , state=_UpperCAmelCase )
assert result == 2
self.assertDictEqual(_UpperCAmelCase , {"""x""": 2, """i""": 2} )
| 305 | 0 |
"""simple docstring"""
import cmath
import math
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = math.radians(__lowerCamelCase )
UpperCAmelCase_ : Tuple = math.radians(__lowerCamelCase )
# Convert voltage and current to rectangular form
UpperCAmelCase_ : Optional[Any] = cmath.rect(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : Optional[int] = cmath.rect(__lowerCamelCase, __lowerCamelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = (PNDMScheduler,)
SCREAMING_SNAKE_CASE__ : str = (("""num_inference_steps""", 50),)
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowercase_ )
return config
def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = dict(self.forward_default_kwargs )
UpperCAmelCase_ : List[str] = kwargs.pop("num_inference_steps" , lowercase_ )
UpperCAmelCase_ : Union[str, Any] = self.dummy_sample
UpperCAmelCase_ : Dict = 0.1 * sample
UpperCAmelCase_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : List[Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
UpperCAmelCase_ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
UpperCAmelCase_ : Optional[int] = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
UpperCAmelCase_ : int = dummy_past_residuals[:]
UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : str = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : Optional[int] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : str = kwargs.pop("num_inference_steps" , lowercase_ )
UpperCAmelCase_ : Optional[int] = self.dummy_sample
UpperCAmelCase_ : List[str] = 0.1 * sample
UpperCAmelCase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
UpperCAmelCase_ : Dict = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[:]
UpperCAmelCase_ : Union[str, Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Dict = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : int = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = self.scheduler_classes[0]
UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : List[Any] = scheduler_class(**lowercase_ )
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : List[str] = self.dummy_model()
UpperCAmelCase_ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase_ : Tuple = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase_ : Any = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Optional[Any] = kwargs.pop("num_inference_steps" , lowercase_ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Any = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ )
UpperCAmelCase_ : str = self.dummy_sample
UpperCAmelCase_ : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , "set_timesteps" ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , "set_timesteps" ):
UpperCAmelCase_ : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ : List[str] = dummy_past_residuals[:]
UpperCAmelCase_ : str = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Any = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
UpperCAmelCase_ : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase_ : int = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase_ : Optional[Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ : List[Any] = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : List[Any] = self.dummy_sample
UpperCAmelCase_ : Optional[int] = 0.1 * sample
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaises(lowercase_ ):
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.full_loop()
UpperCAmelCase_ : Any = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.full_loop(prediction_type="v_prediction" )
UpperCAmelCase_ : str = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : int = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : int = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3
| 23 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class _snake_case :
lowerCAmelCase_ : torch.Tensor # [batch_size x 3]
lowerCAmelCase_ : torch.Tensor # [batch_size x 3]
lowerCAmelCase_ : torch.Tensor # [batch_size x 3]
lowerCAmelCase_ : torch.Tensor # [batch_size x 3]
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : float
lowerCAmelCase_ : float
lowerCAmelCase_ : Tuple[int]
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def lowerCAmelCase__ ( self ) -> torch.Tensor:
'''simple docstring'''
snake_case_ = torch.arange(self.height * self.width )
snake_case_ = torch.stack(
[
pixel_indices % self.width,
torch.div(a__ , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ , *snake_case_ = self.shape
snake_case_ = int(np.prod(a__ ) )
snake_case_ = self.get_image_coords()
snake_case_ = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
snake_case_ = self.get_camera_rays(a__ )
snake_case_ = rays.view(a__ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def lowerCAmelCase__ ( self , a__ ) -> torch.Tensor:
'''simple docstring'''
snake_case_ , *snake_case_ , snake_case_ = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
snake_case_ = coords.view(a__ , -1 , 2 )
snake_case_ = self.resolution()
snake_case_ = self.fov()
snake_case_ = (flat.float() / (res - 1)) * 2 - 1
snake_case_ = fracs * torch.tan(fov / 2 )
snake_case_ = fracs.view(a__ , -1 , 2 )
snake_case_ = (
self.z.view(a__ , 1 , 3 )
+ self.x.view(a__ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(a__ , 1 , 3 ) * fracs[:, :, 1:]
)
snake_case_ = directions / directions.norm(dim=-1 , keepdim=a__ )
snake_case_ = torch.stack(
[
torch.broadcast_to(self.origin.view(a__ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(a__ , *a__ , 2 , 3 )
def lowerCAmelCase__ ( self , a__ , a__ ) -> "DifferentiableProjectiveCamera":
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=a__ , height=a__ , x_fov=self.x_fov , y_fov=self.y_fov , )
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
snake_case_ = []
snake_case_ = []
snake_case_ = []
snake_case_ = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
snake_case_ = np.array([np.sin(snake_case ), np.cos(snake_case ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
snake_case_ = -z * 4
snake_case_ = np.array([np.cos(snake_case ), -np.sin(snake_case ), 0.0] )
snake_case_ = np.cross(snake_case , snake_case )
origins.append(snake_case )
xs.append(snake_case )
ys.append(snake_case )
zs.append(snake_case )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(snake_case , axis=0 ) ).float() , x=torch.from_numpy(np.stack(snake_case , axis=0 ) ).float() , y=torch.from_numpy(np.stack(snake_case , axis=0 ) ).float() , z=torch.from_numpy(np.stack(snake_case , axis=0 ) ).float() , width=snake_case , height=snake_case , x_fov=0.7 , y_fov=0.7 , shape=(1, len(snake_case )) , )
| 85 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def __a ( ) ->None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 258 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""GLPNFeatureExtractor"""]
_SCREAMING_SNAKE_CASE = ["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 352 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 | 0 |
import math
from numpy import inf
from scipy.integrate import quad
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> float:
if num <= 0:
raise ValueError('math domain error' )
return quad(lowerCamelCase__ , 0 , lowerCamelCase__ , args=(lowerCamelCase__) )[0]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
return math.pow(lowerCamelCase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 73 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = ['''image_processor''', '''tokenizer''']
_UpperCAmelCase : List[Any] = '''AutoImageProcessor'''
_UpperCAmelCase : Dict = '''AutoTokenizer'''
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Union[str, Any] = kwargs.pop('feature_extractor')
__lowerCamelCase : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.image_processor
__lowerCamelCase : Optional[int] = False
def __call__( self : int ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = kwargs.pop('images' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = kwargs.pop('text' ,SCREAMING_SNAKE_CASE__)
if len(SCREAMING_SNAKE_CASE__) > 0:
__lowerCamelCase : int = args[0]
__lowerCamelCase : List[str] = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.')
if images is not None:
__lowerCamelCase : Optional[int] = self.image_processor(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text is not None:
__lowerCamelCase : List[Any] = self.tokenizer(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase : Optional[Any] = encodings['input_ids']
return inputs
def lowerCAmelCase ( self : int ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Dict):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any] ,*SCREAMING_SNAKE_CASE__ : List[Any] ,**SCREAMING_SNAKE_CASE__ : Any):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
@contextmanager
def lowerCAmelCase ( self : Tuple):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.')
__lowerCamelCase : List[Any] = True
__lowerCamelCase : str = self.tokenizer
yield
__lowerCamelCase : Tuple = self.image_processor
__lowerCamelCase : Tuple = False
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : List[Any]=None):
if added_vocab is None:
__lowerCamelCase : str = self.tokenizer.get_added_vocab()
__lowerCamelCase : Union[str, Any] = {}
while tokens:
__lowerCamelCase : Tuple = re.search(R'<s_(.*?)>' ,SCREAMING_SNAKE_CASE__ ,re.IGNORECASE)
if start_token is None:
break
__lowerCamelCase : Dict = start_token.group(1)
__lowerCamelCase : List[str] = re.search(RF"</s_{key}>" ,SCREAMING_SNAKE_CASE__ ,re.IGNORECASE)
__lowerCamelCase : Optional[int] = start_token.group()
if end_token is None:
__lowerCamelCase : List[Any] = tokens.replace(SCREAMING_SNAKE_CASE__ ,'')
else:
__lowerCamelCase : Tuple = end_token.group()
__lowerCamelCase : int = re.escape(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = re.escape(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = re.search(F"{start_token_escaped}(.*?){end_token_escaped}" ,SCREAMING_SNAKE_CASE__ ,re.IGNORECASE)
if content is not None:
__lowerCamelCase : List[Any] = content.group(1).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__lowerCamelCase : str = self.tokenajson(SCREAMING_SNAKE_CASE__ ,is_inner_value=SCREAMING_SNAKE_CASE__ ,added_vocab=SCREAMING_SNAKE_CASE__)
if value:
if len(SCREAMING_SNAKE_CASE__) == 1:
__lowerCamelCase : Tuple = value[0]
__lowerCamelCase : int = value
else: # leaf nodes
__lowerCamelCase : Tuple = []
for leaf in content.split(R'<sep/>'):
__lowerCamelCase : List[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__lowerCamelCase : str = leaf[1:-2] # for categorical special tokens
output[key].append(SCREAMING_SNAKE_CASE__)
if len(output[key]) == 1:
__lowerCamelCase : Dict = output[key][0]
__lowerCamelCase : Dict = tokens[tokens.find(SCREAMING_SNAKE_CASE__) + len(SCREAMING_SNAKE_CASE__) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] ,is_inner_value=SCREAMING_SNAKE_CASE__ ,added_vocab=SCREAMING_SNAKE_CASE__)
if len(SCREAMING_SNAKE_CASE__):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowerCAmelCase ( self : List[str]):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,SCREAMING_SNAKE_CASE__ ,)
return self.image_processor_class
@property
def lowerCAmelCase ( self : List[Any]):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' ,SCREAMING_SNAKE_CASE__ ,)
return self.image_processor
| 73 | 1 |
def A__ ( __lowerCamelCase ):
return "".join(chr(ord(__lowerCamelCase ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 257 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCamelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase_ =None
class UpperCamelCase__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase_ =PandasConfig
def _UpperCamelCase ( self ) -> int:
return datasets.DatasetInfo(features=self.config.features )
def _UpperCamelCase ( self , _A ) -> Tuple:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_A , (str, list, tuple) ):
SCREAMING_SNAKE_CASE_ = data_files
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
SCREAMING_SNAKE_CASE_ = []
for split_name, files in data_files.items():
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A ) for file in files]
splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) )
return splits
def _UpperCamelCase ( self , _A ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE_ = table_cast(_A , self.config.features.arrow_schema )
return pa_table
def _UpperCamelCase ( self , _A ) -> Any:
for i, file in enumerate(itertools.chain.from_iterable(_A ) ):
with open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE_ = pa.Table.from_pandas(pd.read_pickle(_A ) )
yield i, self._cast_table(_A )
| 257 | 1 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowercase_ : int = pd.read_csv('sample_data.csv', header=None)
lowercase_ : Any = df.shape[:1][0]
# If you're using some other dataset input the target column
lowercase_ : str = df.iloc[:, 1:2]
lowercase_ : List[Any] = actual_data.values.reshape(len_data, 1)
lowercase_ : Any = MinMaxScaler().fit_transform(actual_data)
lowercase_ : List[str] = 10
lowercase_ : Dict = 5
lowercase_ : List[str] = 20
lowercase_ : Any = len_data - periods * look_back
lowercase_ : Optional[int] = actual_data[:division]
lowercase_ : int = actual_data[division - look_back :]
lowercase_ , lowercase_ : Any = [], []
lowercase_ , lowercase_ : Optional[Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowercase_ : List[Any] = np.array(train_x)
lowercase_ : str = np.array(test_x)
lowercase_ : str = np.array([list(i.ravel()) for i in train_y])
lowercase_ : Dict = np.array([list(i.ravel()) for i in test_y])
lowercase_ : Dict = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
lowercase_ : Any = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
lowercase_ : Union[str, Any] = model.predict(x_test)
| 133 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ : int = logging.get_logger(__name__)
lowercase_ : Optional[Any] = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : int = "roberta"
def __init__( self : Dict , snake_case__ : Tuple=50_265 , snake_case__ : str=768 , snake_case__ : Tuple=12 , snake_case__ : Tuple=12 , snake_case__ : Union[str, Any]=3_072 , snake_case__ : Optional[Any]="gelu" , snake_case__ : int=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : str=512 , snake_case__ : List[str]=2 , snake_case__ : str=0.02 , snake_case__ : int=1e-12 , snake_case__ : List[str]=1 , snake_case__ : Any=0 , snake_case__ : int=2 , snake_case__ : List[Any]="absolute" , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=None , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class __lowerCAmelCase ( UpperCAmelCase__ ):
@property
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 133 | 1 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(_a , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = _distribute_shards(**_a )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = _split_gen_kwargs(_a , _a )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(_a ):
_number_of_shards_in_gen_kwargs(_a )
else:
__UpperCamelCase :Any = _number_of_shards_in_gen_kwargs(_a )
assert out == expected
| 365 | import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = '''▁'''
__lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : int = BertGenerationTokenizer
a__ : Dict = False
a__ : str = True
def UpperCamelCase__ ( self) -> List[str]:
super().setUp()
__UpperCamelCase :Optional[Any] = BertGenerationTokenizer(__lowercase , keep_accents=__lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Dict = '''<s>'''
__UpperCamelCase :Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase) , __lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase) , __lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''<pad>''')
self.assertEqual(len(__lowercase) , 1_002)
def UpperCamelCase__ ( self) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1_000)
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :str = BertGenerationTokenizer(__lowercase , keep_accents=__lowercase)
__UpperCamelCase :Optional[Any] = tokenizer.tokenize('''This is a test''')
self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase) , [285, 46, 10, 170, 382] , )
__UpperCamelCase :List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__UpperCamelCase :Any = tokenizer.convert_tokens_to_ids(__lowercase)
self.assertListEqual(
__lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__UpperCamelCase :Tuple = tokenizer.convert_ids_to_tokens(__lowercase)
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def UpperCamelCase__ ( self) -> int:
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''')
@slow
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = '''Hello World!'''
__UpperCamelCase :Optional[int] = [18_536, 2_260, 101]
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase))
@slow
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
__UpperCamelCase :Union[str, Any] = [
871,
419,
358,
946,
991,
2_521,
452,
358,
1_357,
387,
7_751,
3_536,
112,
985,
456,
126,
865,
938,
5_400,
5_734,
458,
1_368,
467,
786,
2_462,
5_246,
1_159,
633,
865,
4_519,
457,
582,
852,
2_557,
427,
916,
508,
405,
34_324,
497,
391,
408,
11_342,
1_244,
385,
100,
938,
985,
456,
574,
362,
12_597,
3_200,
3_129,
1_172,
]
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase))
@require_torch
@slow
def UpperCamelCase__ ( self) -> Optional[int]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__UpperCamelCase :Optional[Any] = list(self.big_tokenizer.get_vocab().keys())[:10]
__UpperCamelCase :Optional[int] = ''' '''.join(__lowercase)
__UpperCamelCase :Optional[int] = self.big_tokenizer.encode_plus(__lowercase , return_tensors='''pt''' , return_token_type_ids=__lowercase)
__UpperCamelCase :List[str] = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=__lowercase)
__UpperCamelCase :List[Any] = BertGenerationConfig()
__UpperCamelCase :Optional[Any] = BertGenerationEncoder(__lowercase)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__lowercase)
model(**__lowercase)
@slow
def UpperCamelCase__ ( self) -> Dict:
# fmt: off
__UpperCamelCase :List[str] = {'''input_ids''': [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 105 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCAmelCase__ ( unittest.TestCase ):
__a = inspect.getfile(accelerate.test_utils )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_cli.py"""] )
__a = ["""accelerate""", """launch"""]
__a = Path.home() / """.cache/huggingface/accelerate"""
__a = """default_config.yaml"""
__a = config_folder / config_file
__a = config_folder / """_default_config.yaml"""
__a = Path("""tests/test_configs""" )
@classmethod
def lowercase ( cls : int ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowercase ( cls : Dict ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowercase ( self : Union[str, Any] ):
_snake_case = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowercase ( self : Any ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=__snake_case ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(__snake_case ), self.test_file_path] , env=os.environ.copy() )
def lowercase ( self : str ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class lowerCAmelCase__ ( unittest.TestCase ):
__a = """test-tpu"""
__a = """us-central1-a"""
__a = """ls"""
__a = ["""accelerate""", """tpu-config"""]
__a = """cd /usr/share"""
__a = """tests/test_samples/test_command_file.sh"""
__a = """Running gcloud compute tpus tpu-vm ssh"""
def lowercase ( self : List[Any] ):
_snake_case = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __snake_case , )
def lowercase ( self : Union[str, Any] ):
_snake_case = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __snake_case , )
def lowercase ( self : str ):
_snake_case = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=__snake_case )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all''' , __snake_case , )
def lowercase ( self : str ):
_snake_case = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __snake_case , )
def lowercase ( self : Optional[int] ):
_snake_case = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all''' , __snake_case , )
def lowercase ( self : Union[str, Any] ):
_snake_case = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all''' , __snake_case , )
def lowercase ( self : Tuple ):
_snake_case = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all''' , __snake_case , )
def lowercase ( self : int ):
_snake_case = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all''' , __snake_case , )
def lowercase ( self : Union[str, Any] ):
_snake_case = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all''' , __snake_case , )
| 288 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCamelCase__: int = logging.get_logger(__name__)
UpperCamelCase__: Dict = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
UpperCamelCase__: Optional[Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def snake_case_ ( _lowerCAmelCase : str ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = {}
with open(_lowerCAmelCase , '''r''' ) as file:
for line_number, line in enumerate(_lowerCAmelCase ):
UpperCAmelCase : List[str] = line.strip()
if line:
UpperCAmelCase : str = line.split()
UpperCAmelCase : Union[str, Any] = line_number
UpperCAmelCase : List[Any] = words[0]
UpperCAmelCase : Union[str, Any] = value
return result
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str ) -> int:
for attribute in key.split('''.''' ):
UpperCAmelCase : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Dict = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCAmelCase ):
UpperCAmelCase : Any = PARAM_MAPPING[full_name.split('''.''' )[-1]]
UpperCAmelCase : Dict = '''param'''
if weight_type is not None and weight_type != "param":
UpperCAmelCase : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
UpperCAmelCase : List[Any] = hf_pointer
for attribute in hf_param_name.split('''.''' ):
UpperCAmelCase : Optional[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : int = shape_pointer.shape
# let's reduce dimension
UpperCAmelCase : Union[str, Any] = value[0]
else:
UpperCAmelCase : List[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase : int = value
elif weight_type == "weight_g":
UpperCAmelCase : str = value
elif weight_type == "weight_v":
UpperCAmelCase : Dict = value
elif weight_type == "bias":
UpperCAmelCase : str = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
UpperCAmelCase : int = getattr(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Optional[int] = value
else:
UpperCAmelCase : Tuple = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] ) -> List[Any]:
UpperCAmelCase : List[str] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCAmelCase ):
UpperCAmelCase : List[str] = PARAM_MAPPING[full_name.split('''.''' )[-1]]
UpperCAmelCase : Any = '''param'''
if weight_type is not None and weight_type != "param":
UpperCAmelCase : Optional[int] = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCAmelCase : Optional[int] = '''.'''.join([key, hf_param_name] )
else:
UpperCAmelCase : List[Any] = key
UpperCAmelCase : Tuple = value if '''lm_head''' in full_key else value[0]
UpperCamelCase__: Tuple = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any=None , _lowerCAmelCase : Optional[Any]=None ) -> int:
UpperCAmelCase : List[Any] = False
for key, mapped_key in MAPPING.items():
UpperCAmelCase : int = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCAmelCase : Optional[Any] = True
if "*" in mapped_key:
UpperCAmelCase : Tuple = name.split(_lowerCAmelCase )[0].split('''.''' )[-2]
UpperCAmelCase : List[Any] = mapped_key.replace('''*''' , _lowerCAmelCase )
if "weight_g" in name:
UpperCAmelCase : str = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase : int = '''weight_v'''
elif "bias" in name:
UpperCAmelCase : int = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase : List[str] = '''weight'''
else:
UpperCAmelCase : Dict = None
if hf_dict is not None:
rename_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return is_used
return is_used
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ) -> Any:
UpperCAmelCase : Dict = []
UpperCAmelCase : Dict = fairseq_model.state_dict()
UpperCAmelCase : Union[str, Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase : Any = True
else:
UpperCAmelCase : Optional[Any] = load_wavaveca_layer(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Any = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase : Optional[int] = name.split('''.''' )
UpperCAmelCase : Tuple = int(items[0] )
UpperCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase : Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase : Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : int=True , _lowerCAmelCase : Optional[int]=False ) -> Dict:
if config_path is not None:
UpperCAmelCase : List[str] = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
else:
UpperCAmelCase : List[Any] = WavaVecaConfig()
if is_seq_class:
UpperCAmelCase : Optional[Any] = read_txt_into_dict(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = idalabel
UpperCAmelCase : Optional[Any] = WavaVecaForSequenceClassification(_lowerCAmelCase )
UpperCAmelCase : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
feature_extractor.save_pretrained(_lowerCAmelCase )
elif is_finetuned:
if dict_path:
UpperCAmelCase : Dict = Dictionary.load(_lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase : Any = target_dict.pad_index
UpperCAmelCase : Tuple = target_dict.bos_index
UpperCAmelCase : Optional[int] = target_dict.eos_index
UpperCAmelCase : Union[str, Any] = len(target_dict.symbols )
UpperCAmelCase : Dict = os.path.join(_lowerCAmelCase , '''vocab.json''' )
if not os.path.isdir(_lowerCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_lowerCAmelCase ) )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
UpperCAmelCase : List[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase : List[str] = 0
UpperCAmelCase : List[str] = 1
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Optional[int] = WavaVecaCTCTokenizer(
_lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_lowerCAmelCase , )
UpperCAmelCase : int = True if config.feat_extract_norm == '''layer''' else False
UpperCAmelCase : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
UpperCAmelCase : str = WavaVecaProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = WavaVecaForCTC(_lowerCAmelCase )
else:
UpperCAmelCase : Dict = WavaVecaForPreTraining(_lowerCAmelCase )
if is_finetuned or is_seq_class:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
UpperCAmelCase : Optional[Any] = argparse.Namespace(task='''audio_pretraining''' )
UpperCAmelCase : List[Any] = fairseq.tasks.setup_task(_lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCAmelCase )
UpperCAmelCase : Optional[int] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase__: Dict = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
UpperCamelCase__: Any = parser.parse_args()
UpperCamelCase__: int = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 23 | 0 |
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(4_2)
_SCREAMING_SNAKE_CASE = '''bert-base-cased'''
_SCREAMING_SNAKE_CASE = '''fp16'''
_SCREAMING_SNAKE_CASE = '''bf16'''
_SCREAMING_SNAKE_CASE = [FPaa, BFaa]
@require_fsdp
@require_cuda
class __lowercase ( __snake_case ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
__lowercase = dict(
ACCELERATE_USE_FSDP='''true''' ,MASTER_ADDR='''localhost''' ,MASTER_PORT='''10999''' ,RANK='''0''' ,LOCAL_RANK='''0''' ,WORLD_SIZE='''1''' ,)
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(lowerCamelCase_ ):
__lowercase = self.dist_env.copy()
__lowercase = f"{i + 1}"
__lowercase = strategy
with mockenv_context(**lowerCamelCase_ ):
__lowercase = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy ,ShardingStrategy(i + 1 ) )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(lowerCamelCase_ ):
__lowercase = self.dist_env.copy()
__lowercase = prefetch_policy
with mockenv_context(**lowerCamelCase_ ):
__lowercase = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch ,BackwardPrefetch(i + 1 ) )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(lowerCamelCase_ ):
__lowercase = self.dist_env.copy()
__lowercase = state_dict_type
with mockenv_context(**lowerCamelCase_ ):
__lowercase = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type ,StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = AutoModel.from_pretrained(lowerCamelCase_ )
for policy in FSDP_AUTO_WRAP_POLICY:
__lowercase = self.dist_env.copy()
__lowercase = policy
if policy == "TRANSFORMER_BASED_WRAP":
__lowercase = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
__lowercase = """2000"""
with mockenv_context(**lowerCamelCase_ ):
__lowercase = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase_ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
__lowercase = self.dist_env.copy()
__lowercase = """TRANSFORMER_BASED_WRAP"""
__lowercase = """T5Layer"""
with mockenv_context(**lowerCamelCase_ ):
__lowercase = FullyShardedDataParallelPlugin()
with self.assertRaises(lowerCamelCase_ ) as cm:
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase_ )
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) )
__lowercase = self.dist_env.copy()
__lowercase = """SIZE_BASED_WRAP"""
__lowercase = """0"""
with mockenv_context(**lowerCamelCase_ ):
__lowercase = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase_ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
__lowercase = self.dist_env.copy()
__lowercase = mp_dtype
with mockenv_context(**lowerCamelCase_ ):
__lowercase = Accelerator()
if mp_dtype == "fp16":
__lowercase = torch.floataa
elif mp_dtype == "bf16":
__lowercase = torch.bfloataa
__lowercase = MixedPrecision(param_dtype=lowerCamelCase_ ,reduce_dtype=lowerCamelCase_ ,buffer_dtype=lowerCamelCase_ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy ,lowerCamelCase_ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler ,lowerCamelCase_ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(lowerCamelCase_ )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
__lowercase = self.dist_env.copy()
__lowercase = str(lowerCamelCase_ ).lower()
with mockenv_context(**lowerCamelCase_ ):
__lowercase = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload ,CPUOffload(offload_params=lowerCamelCase_ ) )
@require_fsdp
@require_multi_gpu
@slow
class __lowercase ( __snake_case ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
__lowercase = 0.8_2
__lowercase = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
__lowercase = {
"""multi_gpu_fp16""": 3200,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
__lowercase = 160
__lowercase = 160
__lowercase = inspect.getfile(accelerate.test_utils )
__lowercase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = os.path.join(self.test_scripts_folder ,'''test_performance.py''' )
__lowercase = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
__lowercase = cmd.copy()
for i, strategy in enumerate(lowerCamelCase_ ):
if strategy.lower() in config:
cmd_config.append(f"--fsdp_sharding_strategy={i+1}" )
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''' )
else:
cmd_config.append('''--mixed_precision=fp16''' )
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"--fsdp_auto_wrap_policy={policy}" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
f"--output_dir={self.tmpdir}",
f"--performance_lower_bound={self.performance_lower_bound}",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase_ ,env=os.environ.copy() )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = os.path.join(self.test_scripts_folder ,'''test_checkpointing.py''' )
__lowercase = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(lowerCamelCase_ ):
__lowercase = cmd.copy()
cmd_config.append(f"--fsdp_sharding_strategy={i+1}" )
if strategy != "FULL_SHARD":
continue
__lowercase = len(lowerCamelCase_ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
__lowercase = cmd_config[:state_dict_config_index]
cmd_config.append(f"--fsdp_state_dict_type={state_dict_type}" )
cmd_config.extend(
[
self.test_file_path,
f"--output_dir={self.tmpdir}",
'''--partial_train_epoch=1''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase_ ,env=os.environ.copy() )
__lowercase = cmd_config[:-1]
__lowercase = os.path.join(self.tmpdir ,'''epoch_0''' )
cmd_config.extend(
[
f"--resume_from_checkpoint={resume_from_checkpoint}",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase_ ,env=os.environ.copy() )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = os.path.join(self.test_scripts_folder ,'''test_peak_memory_usage.py''' )
__lowercase = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
__lowercase = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''] )
else:
cmd_config.extend(['''--mixed_precision=no'''] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''] )
for i, strategy in enumerate(lowerCamelCase_ ):
if strategy.lower() in spec:
cmd_config.append(f"--fsdp_sharding_strategy={i+1}" )
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"--fsdp_auto_wrap_policy={policy}" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
f"--output_dir={self.tmpdir}",
f"--peak_memory_upper_bound={peak_mem_upper_bound}",
f"--n_train={self.n_train}",
f"--n_val={self.n_val}",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase_ ,env=os.environ.copy() )
| 365 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : List[Any] = CanineTokenizer
a : Union[str, Any] = False
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
super().setUp()
__lowercase = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return CanineTokenizer.from_pretrained('''google/canine-s''' )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> CanineTokenizer:
'''simple docstring'''
__lowercase = self.tokenizer_class.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
__lowercase = 1024
return tokenizer
@require_torch
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.canine_tokenizer
__lowercase = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
__lowercase = [57344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57345, 0, 0, 0, 0]
# fmt: on
__lowercase = tokenizer(_lowerCamelCase ,padding=_lowerCamelCase ,return_tensors='''pt''' )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
__lowercase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertEqual((2, 39) ,batch.input_ids.shape )
self.assertEqual((2, 39) ,batch.attention_mask.shape )
@require_torch
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = self.canine_tokenizer
__lowercase = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
__lowercase = tokenizer(_lowerCamelCase ,padding=_lowerCamelCase ,return_tensors='''pt''' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('''input_ids''' ,_lowerCamelCase )
self.assertIn('''attention_mask''' ,_lowerCamelCase )
self.assertIn('''token_type_ids''' ,_lowerCamelCase )
@require_torch
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = self.canine_tokenizer
__lowercase = [
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
__lowercase = tokenizer(
text_target=_lowerCamelCase ,max_length=32 ,padding='''max_length''' ,truncation=_lowerCamelCase ,return_tensors='''pt''' )
self.assertEqual(32 ,targets['''input_ids'''].shape[1] )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length ,42 )
# Now let's start the test
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowercase = tempfile.mkdtemp()
__lowercase = ''' He is very happy, UNwant\u00E9d,running'''
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
__lowercase = tokenizer.__class__.from_pretrained(_lowerCamelCase )
__lowercase = after_tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
shutil.rmtree(_lowerCamelCase )
__lowercase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowercase = tempfile.mkdtemp()
__lowercase = ''' He is very happy, UNwant\u00E9d,running'''
__lowercase = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__lowercase = chr(0xe_0_0_7 )
additional_special_tokens.append(_lowerCamelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
__lowercase = tokenizer.__class__.from_pretrained(_lowerCamelCase )
__lowercase = after_tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertIn(_lowerCamelCase ,after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length ,42 )
__lowercase = tokenizer.__class__.from_pretrained(_lowerCamelCase ,model_max_length=43 )
self.assertEqual(tokenizer.model_max_length ,43 )
shutil.rmtree(_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__lowercase , __lowercase = self.get_clean_sequence(_lowerCamelCase )
# a special token for Canine can be defined as follows:
__lowercase = 0xe_0_0_5
__lowercase = chr(_lowerCamelCase )
tokenizer.add_special_tokens({'''cls_token''': special_token} )
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) ,1 )
__lowercase = tokenizer.decode(ids + encoded_special_token ,clean_up_tokenization_spaces=_lowerCamelCase )
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,input_encoded + special_token_id )
__lowercase = tokenizer.decode(_lowerCamelCase ,skip_special_tokens=_lowerCamelCase )
self.assertTrue(special_token not in decoded )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__lowercase = chr(0xe_0_0_5 )
__lowercase = chr(0xe_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] ,special_tokens=_lowerCamelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} )
__lowercase = tokenizer.tokenize(_lowerCamelCase )
__lowercase = tokenizer.tokenize(_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) ,1 )
self.assertEqual(len(_lowerCamelCase ) ,1 )
self.assertEqual(token_a[0] ,_lowerCamelCase )
self.assertEqual(token_a[0] ,_lowerCamelCase )
@require_tokenizers
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
__lowercase = 0xe_0_0_6
__lowercase = chr(_lowerCamelCase )
__lowercase = AddedToken(_lowerCamelCase ,lstrip=_lowerCamelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_lowerCamelCase )
tokenizer.from_pretrained(_lowerCamelCase )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase ,'''special_tokens_map.json''' ) ,encoding='''utf-8''' ) as json_file:
__lowercase = json.load(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase ,'''tokenizer_config.json''' ) ,encoding='''utf-8''' ) as json_file:
__lowercase = json.load(_lowerCamelCase )
# a special token for Canine can be defined as follows:
__lowercase = 0xe_0_0_6
__lowercase = chr(_lowerCamelCase )
__lowercase = [new_token_a]
__lowercase = [new_token_a]
with open(os.path.join(_lowerCamelCase ,'''special_tokens_map.json''' ) ,'''w''' ,encoding='''utf-8''' ) as outfile:
json.dump(_lowerCamelCase ,_lowerCamelCase )
with open(os.path.join(_lowerCamelCase ,'''tokenizer_config.json''' ) ,'''w''' ,encoding='''utf-8''' ) as outfile:
json.dump(_lowerCamelCase ,_lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowercase = tokenizer_class.from_pretrained(_lowerCamelCase ,extra_ids=0 )
self.assertIn(_lowerCamelCase ,tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] ,tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) ,)
__lowercase = 0xe_0_0_7
__lowercase = chr(_lowerCamelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowercase = [AddedToken(_lowerCamelCase ,lstrip=_lowerCamelCase )]
__lowercase = tokenizer_class.from_pretrained(
_lowerCamelCase ,additional_special_tokens=_lowerCamelCase ,extra_ids=0 )
self.assertIn(_lowerCamelCase ,tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] ,tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__lowercase = '''hello world'''
if self.space_between_special_tokens:
__lowercase = '''[CLS] hello world [SEP]'''
else:
__lowercase = input
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
__lowercase = tokenizer.decode(_lowerCamelCase ,spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_lowerCamelCase ,[output, output.lower()] )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__lowercase = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
__lowercase = '''a'''
__lowercase = ord(_lowerCamelCase )
for attr in attributes_list:
setattr(_lowerCamelCase ,attr + '''_id''' ,_lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase ,_lowerCamelCase ) ,_lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase ,attr + '''_id''' ) ,_lowerCamelCase )
setattr(_lowerCamelCase ,attr + '''_id''' ,_lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase ,_lowerCamelCase ) ,_lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase ,attr + '''_id''' ) ,_lowerCamelCase )
setattr(_lowerCamelCase ,'''additional_special_tokens_ids''' ,[] )
self.assertListEqual(getattr(_lowerCamelCase ,'''additional_special_tokens''' ) ,[] )
self.assertListEqual(getattr(_lowerCamelCase ,'''additional_special_tokens_ids''' ) ,[] )
__lowercase = 0xe_0_0_6
__lowercase = chr(_lowerCamelCase )
setattr(_lowerCamelCase ,'''additional_special_tokens_ids''' ,[additional_special_token_id] )
self.assertListEqual(getattr(_lowerCamelCase ,'''additional_special_tokens''' ) ,[additional_special_token] )
self.assertListEqual(getattr(_lowerCamelCase ,'''additional_special_tokens_ids''' ) ,[additional_special_token_id] )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
pass
| 217 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=_UpperCamelCase ):
lowerCAmelCase : str = ['note_seq']
def __init__( self : Tuple ,*_UpperCAmelCase : List[Any] ,**_UpperCAmelCase : str ):
requires_backends(self ,['note_seq'] )
@classmethod
def __lowercase ( cls : List[Any] ,*_UpperCAmelCase : str ,**_UpperCAmelCase : Optional[Any] ):
requires_backends(cls ,['note_seq'] )
@classmethod
def __lowercase ( cls : Union[str, Any] ,*_UpperCAmelCase : Dict ,**_UpperCAmelCase : Any ):
requires_backends(cls ,['note_seq'] )
| 89 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : List[str] = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__lowerCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 | 0 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase="shi-labs/oneformer_demo" ) -> Union[str, Any]:
"""simple docstring"""
with open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) as f:
snake_case__ : Optional[int] = json.load(__lowerCAmelCase )
snake_case__ : List[Any] = {}
snake_case__ : str = []
snake_case__ : Tuple = []
for key, info in class_info.items():
snake_case__ : str = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(__lowerCAmelCase ) )
snake_case__ : Tuple = thing_ids
snake_case__ : int = class_names
return metadata
class a ( unittest.TestCase ):
def __init__( self :Dict ,__lowercase :List[Any] ,__lowercase :str=7 ,__lowercase :Any=3 ,__lowercase :Union[str, Any]=3_0 ,__lowercase :Optional[int]=4_0_0 ,__lowercase :int=None ,__lowercase :Dict=True ,__lowercase :Dict=True ,__lowercase :Optional[Any]=[0.5, 0.5, 0.5] ,__lowercase :List[str]=[0.5, 0.5, 0.5] ,__lowercase :Dict=1_0 ,__lowercase :List[str]=False ,__lowercase :Tuple=2_5_5 ,__lowercase :Any="shi-labs/oneformer_demo" ,__lowercase :str="ade20k_panoptic.json" ,__lowercase :str=1_0 ,):
snake_case__ : Dict = parent
snake_case__ : Dict = batch_size
snake_case__ : str = num_channels
snake_case__ : Union[str, Any] = min_resolution
snake_case__ : Optional[Any] = max_resolution
snake_case__ : str = do_resize
snake_case__ : Any = {'''shortest_edge''': 3_2, '''longest_edge''': 1_3_3_3} if size is None else size
snake_case__ : int = do_normalize
snake_case__ : str = image_mean
snake_case__ : List[Any] = image_std
snake_case__ : str = class_info_file
snake_case__ : Union[str, Any] = prepare_metadata(__lowercase ,__lowercase )
snake_case__ : List[Any] = num_text
snake_case__ : Any = repo_path
# for the post_process_functions
snake_case__ : List[str] = 2
snake_case__ : Tuple = 1_0
snake_case__ : List[str] = 1_0
snake_case__ : Tuple = 3
snake_case__ : int = 4
snake_case__ : Optional[int] = num_labels
snake_case__ : Optional[Any] = do_reduce_labels
snake_case__ : Dict = ignore_index
def __lowerCamelCase ( self :Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def __lowerCamelCase ( self :Tuple ,__lowercase :int ,__lowercase :List[Any]=False ):
if not batched:
snake_case__ : Any = image_inputs[0]
if isinstance(__lowercase ,Image.Image ):
snake_case__ , snake_case__ : Optional[Any] = image.size
else:
snake_case__ , snake_case__ : Optional[Any] = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Optional[Any] = int(self.size['''shortest_edge'''] * h / w )
snake_case__ : Union[str, Any] = self.size['''shortest_edge''']
elif w > h:
snake_case__ : Optional[Any] = self.size['''shortest_edge''']
snake_case__ : Optional[Any] = int(self.size['''shortest_edge'''] * w / h )
else:
snake_case__ : Tuple = self.size['''shortest_edge''']
snake_case__ : Optional[int] = self.size['''shortest_edge''']
else:
snake_case__ : Union[str, Any] = []
for image in image_inputs:
snake_case__ , snake_case__ : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : Optional[int] = max(__lowercase ,key=lambda __lowercase : item[0] )[0]
snake_case__ : Union[str, Any] = max(__lowercase ,key=lambda __lowercase : item[1] )[1]
return expected_height, expected_width
def __lowerCamelCase ( self :Any ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) ,masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) ,)
@require_torch
@require_vision
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : str = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__lowerCAmelCase : List[Any] = image_processing_class
def __lowerCamelCase ( self :Any ):
snake_case__ : Dict = OneFormerImageProcessorTester(self )
@property
def __lowerCamelCase ( self :Optional[Any] ):
return self.image_processing_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self :Tuple ):
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase ,'''image_mean''' ) )
self.assertTrue(hasattr(__lowercase ,'''image_std''' ) )
self.assertTrue(hasattr(__lowercase ,'''do_normalize''' ) )
self.assertTrue(hasattr(__lowercase ,'''do_resize''' ) )
self.assertTrue(hasattr(__lowercase ,'''size''' ) )
self.assertTrue(hasattr(__lowercase ,'''ignore_index''' ) )
self.assertTrue(hasattr(__lowercase ,'''class_info_file''' ) )
self.assertTrue(hasattr(__lowercase ,'''num_text''' ) )
self.assertTrue(hasattr(__lowercase ,'''repo_path''' ) )
self.assertTrue(hasattr(__lowercase ,'''metadata''' ) )
self.assertTrue(hasattr(__lowercase ,'''do_reduce_labels''' ) )
def __lowerCamelCase ( self :Any ):
pass
def __lowerCamelCase ( self :List[Any] ):
# Initialize image_processor
snake_case__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Any = prepare_image_inputs(self.image_processing_tester ,equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,Image.Image )
# Test not batched input
snake_case__ : Union[str, Any] = image_processor(image_inputs[0] ,['''semantic'''] ,return_tensors='''pt''' ).pixel_values
snake_case__ , snake_case__ : str = self.image_processing_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape ,(1, self.image_processing_tester.num_channels, expected_height, expected_width) ,)
# Test batched
snake_case__ , snake_case__ : Dict = self.image_processing_tester.get_expected_values(__lowercase ,batched=__lowercase )
snake_case__ : Dict = image_processor(
__lowercase ,['''semantic'''] * len(__lowercase ) ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) ,)
def __lowerCamelCase ( self :Dict ):
# Initialize image_processor
snake_case__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Dict = prepare_image_inputs(self.image_processing_tester ,equal_resolution=__lowercase ,numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,np.ndarray )
# Test not batched input
snake_case__ : Any = image_processor(image_inputs[0] ,['''semantic'''] ,return_tensors='''pt''' ).pixel_values
snake_case__ , snake_case__ : Any = self.image_processing_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape ,(1, self.image_processing_tester.num_channels, expected_height, expected_width) ,)
# Test batched
snake_case__ , snake_case__ : Any = self.image_processing_tester.get_expected_values(__lowercase ,batched=__lowercase )
snake_case__ : List[Any] = image_processor(
__lowercase ,['''semantic'''] * len(__lowercase ) ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) ,)
def __lowerCamelCase ( self :str ):
# Initialize image_processor
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processing_tester ,equal_resolution=__lowercase ,torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,torch.Tensor )
# Test not batched input
snake_case__ : Optional[int] = image_processor(image_inputs[0] ,['''semantic'''] ,return_tensors='''pt''' ).pixel_values
snake_case__ , snake_case__ : Optional[Any] = self.image_processing_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape ,(1, self.image_processing_tester.num_channels, expected_height, expected_width) ,)
# Test batched
snake_case__ , snake_case__ : Any = self.image_processing_tester.get_expected_values(__lowercase ,batched=__lowercase )
snake_case__ : Tuple = image_processor(
__lowercase ,['''semantic'''] * len(__lowercase ) ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) ,)
def __lowerCamelCase ( self :Dict ,__lowercase :Optional[Any]=False ,__lowercase :str=False ,__lowercase :List[Any]="np" ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
snake_case__ : Any = self.image_processing_tester.num_labels
snake_case__ : List[str] = None
snake_case__ : int = None
snake_case__ : List[Any] = prepare_image_inputs(self.image_processing_tester ,equal_resolution=__lowercase )
if with_segmentation_maps:
snake_case__ : Tuple = num_labels
if is_instance_map:
snake_case__ : Optional[int] = list(range(__lowercase ) ) * 2
snake_case__ : Dict = dict(enumerate(__lowercase ) )
snake_case__ : Optional[Any] = [
np.random.randint(0 ,high * 2 ,(img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
snake_case__ : Tuple = [Image.fromarray(__lowercase ) for annotation in annotations]
snake_case__ : str = image_processor(
__lowercase ,['''semantic'''] * len(__lowercase ) ,__lowercase ,return_tensors='''pt''' ,instance_id_to_semantic_id=__lowercase ,pad_and_return_pixel_mask=__lowercase ,)
return inputs
def __lowerCamelCase ( self :Any ):
pass
def __lowerCamelCase ( self :int ):
def common(__lowercase :int=False ,__lowercase :Tuple=None ):
snake_case__ : List[Any] = self.comm_get_image_processor_inputs(
with_segmentation_maps=__lowercase ,is_instance_map=__lowercase ,segmentation_type=__lowercase )
snake_case__ : List[str] = inputs['''mask_labels''']
snake_case__ : Tuple = inputs['''class_labels''']
snake_case__ : Any = inputs['''pixel_values''']
snake_case__ : str = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(__lowercase ,__lowercase ,__lowercase ):
self.assertEqual(mask_label.shape[0] ,class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] ,pixel_values.shape[2:] )
self.assertEqual(len(__lowercase ) ,self.image_processing_tester.num_text )
common()
common(is_instance_map=__lowercase )
common(is_instance_map=__lowercase ,segmentation_type='''pil''' )
common(is_instance_map=__lowercase ,segmentation_type='''pil''' )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Optional[Any] = np.zeros((2_0, 5_0) )
snake_case__ : List[str] = 1
snake_case__ : Any = 1
snake_case__ : int = 1
snake_case__ : Dict = binary_mask_to_rle(__lowercase )
self.assertEqual(len(__lowercase ) ,4 )
self.assertEqual(rle[0] ,2_1 )
self.assertEqual(rle[1] ,4_5 )
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes ,max_seq_length=7_7 ,task_seq_length=7_7 ,class_info_file='''ade20k_panoptic.json''' ,num_text=self.image_processing_tester.num_text ,repo_path='''shi-labs/oneformer_demo''' ,)
snake_case__ : List[str] = self.image_processing_tester.get_fake_oneformer_outputs()
snake_case__ : Optional[int] = fature_extractor.post_process_semantic_segmentation(__lowercase )
self.assertEqual(len(__lowercase ) ,self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape ,(
self.image_processing_tester.height,
self.image_processing_tester.width,
) ,)
snake_case__ : List[str] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
snake_case__ : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(__lowercase ,target_sizes=__lowercase )
self.assertEqual(segmentation[0].shape ,target_sizes[0] )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes ,max_seq_length=7_7 ,task_seq_length=7_7 ,class_info_file='''ade20k_panoptic.json''' ,num_text=self.image_processing_tester.num_text ,repo_path='''shi-labs/oneformer_demo''' ,)
snake_case__ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
snake_case__ : List[str] = image_processor.post_process_instance_segmentation(__lowercase ,threshold=0 )
self.assertTrue(len(__lowercase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) ,__lowercase )
self.assertEqual(
el['''segmentation'''].shape ,(self.image_processing_tester.height, self.image_processing_tester.width) )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Union[str, Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes ,max_seq_length=7_7 ,task_seq_length=7_7 ,class_info_file='''ade20k_panoptic.json''' ,num_text=self.image_processing_tester.num_text ,repo_path='''shi-labs/oneformer_demo''' ,)
snake_case__ : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
snake_case__ : Optional[int] = image_processor.post_process_panoptic_segmentation(__lowercase ,threshold=0 )
self.assertTrue(len(__lowercase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) ,__lowercase )
self.assertEqual(
el['''segmentation'''].shape ,(self.image_processing_tester.height, self.image_processing_tester.width) )
| 44 |
def _lowerCAmelCase ( __lowerCAmelCase = 50 ) -> int:
"""simple docstring"""
snake_case__ : Optional[int] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 44 | 1 |
def __lowercase ( a__ = 50_00_00_00 ) -> int:
__SCREAMING_SNAKE_CASE = set()
__SCREAMING_SNAKE_CASE = int((limit - 24) ** (1 / 2) )
__SCREAMING_SNAKE_CASE = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , a__ ) ) )
for primea in primes:
__SCREAMING_SNAKE_CASE = primea * primea
for primea in primes:
__SCREAMING_SNAKE_CASE = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
__SCREAMING_SNAKE_CASE = primea * primea * primea * primea
__SCREAMING_SNAKE_CASE = square + cube + tetr
if total >= limit:
break
ret.add(a__ )
return len(a__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 257 |
from math import factorial
def __lowercase ( a__ = 1_00 ) -> int:
return sum(int(a__ ) for x in str(factorial(a__ ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 257 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'openai-gpt'
lowerCAmelCase__ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , lowercase=40478 , lowercase=512 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-5 , lowercase=0.0_2 , lowercase="cls_index" , lowercase=True , lowercase=None , lowercase=True , lowercase=0.1 , **lowercase , ) -> List[Any]:
lowerCamelCase_ = vocab_size
lowerCamelCase_ = n_positions
lowerCamelCase_ = n_embd
lowerCamelCase_ = n_layer
lowerCamelCase_ = n_head
lowerCamelCase_ = afn
lowerCamelCase_ = resid_pdrop
lowerCamelCase_ = embd_pdrop
lowerCamelCase_ = attn_pdrop
lowerCamelCase_ = layer_norm_epsilon
lowerCamelCase_ = initializer_range
lowerCamelCase_ = summary_type
lowerCamelCase_ = summary_use_proj
lowerCamelCase_ = summary_activation
lowerCamelCase_ = summary_first_dropout
lowerCamelCase_ = summary_proj_to_labels
super().__init__(**lowercase )
| 47 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase = 16 , lowercase = 88 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = 32 , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = "geglu" , lowercase = None , ) -> Any:
super().__init__()
lowerCamelCase_ = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowercase , attention_head_dim=lowercase , in_channels=lowercase , num_layers=lowercase , dropout=lowercase , norm_num_groups=lowercase , cross_attention_dim=lowercase , attention_bias=lowercase , sample_size=lowercase , num_vector_embeds=lowercase , activation_fn=lowercase , num_embeds_ada_norm=lowercase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCamelCase_ = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCamelCase_ = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCamelCase_ = [1, 0]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase = True , ) -> int:
lowerCamelCase_ = hidden_states
lowerCamelCase_ = []
lowerCamelCase_ = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCamelCase_ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCamelCase_ = self.transformer_index_for_condition[i]
lowerCamelCase_ = self.transformers[transformer_index](
lowercase , encoder_hidden_states=lowercase , timestep=lowercase , cross_attention_kwargs=lowercase , return_dict=lowercase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCamelCase_ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCamelCase_ = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowercase )
| 47 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __magic_name__ ( __UpperCAmelCase="" ) -> str:
'''simple docstring'''
snake_case_ = tempfile.mkdtemp()
return os.path.join(__UpperCAmelCase, str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class a ( unittest.TestCase ):
def A_ ( self : int ):
snake_case_ = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case_ = AgentAudio(lowercase_ )
snake_case_ = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowercase_ ) )
# Ensure that the file contains the same value as the original tensor
snake_case_ ,snake_case_ = sf.read(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , torch.tensor(lowercase_ ) , atol=1e-4 ) )
def A_ ( self : Optional[int] ):
snake_case_ = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case_ = get_new_path(suffix='''.wav''' )
sf.write(lowercase_ , lowercase_ , 1_6000 )
snake_case_ = AgentAudio(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , lowercase_ )
@require_vision
@require_torch
class a ( unittest.TestCase ):
def A_ ( self : List[Any] ):
snake_case_ = torch.randint(0 , 256 , (64, 64, 3) )
snake_case_ = AgentImage(lowercase_ )
snake_case_ = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_ , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
def A_ ( self : Tuple ):
snake_case_ = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
snake_case_ = Image.open(lowercase_ )
snake_case_ = AgentImage(lowercase_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
def A_ ( self : Dict ):
snake_case_ = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
snake_case_ = Image.open(lowercase_ )
snake_case_ = AgentImage(lowercase_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
class a ( unittest.TestCase ):
def A_ ( self : int ):
snake_case_ = '''Hey!'''
snake_case_ = AgentText(lowercase_ )
self.assertEqual(lowercase_ , agent_type.to_string() )
self.assertEqual(lowercase_ , agent_type.to_raw() )
self.assertEqual(lowercase_ , lowercase_ )
| 56 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __UpperCamelCase ( a__ ):
lowerCamelCase : List[str] =ComputeEnvironment.AMAZON_SAGEMAKER
lowerCamelCase : str =True
lowerCamelCase : Union[str, Any] ="""ml.p3.2xlarge"""
lowerCamelCase : str ="""accelerate_sagemaker_execution_role"""
lowerCamelCase : int ="""hf-sm"""
lowerCamelCase : int ="""us-east-1"""
lowerCamelCase : Tuple =1
lowerCamelCase : Any ="""accelerate-sagemaker-1"""
lowerCamelCase : str ="""1.6"""
lowerCamelCase : Tuple ="""4.4"""
lowerCamelCase : Optional[int] ="""train.py"""
lowerCamelCase : Optional[Any] =[
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
lowerCamelCase : Union[str, Any] =[
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> List[str]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
a : str = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] , lowerCAmelCase__ )
assert isinstance(converted_args["do_train"] , lowerCAmelCase__ )
assert isinstance(converted_args["epochs"] , lowerCAmelCase__ )
assert isinstance(converted_args["learning_rate"] , lowerCAmelCase__ )
assert isinstance(converted_args["max_steps"] , lowerCAmelCase__ )
with pytest.raises(lowerCAmelCase__ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 105 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 352 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase_ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
_UpperCamelCase : int = '''maskformer'''
_UpperCamelCase : Union[str, Any] = {'''hidden_size''': '''mask_feature_size'''}
_UpperCamelCase : Dict = ['''resnet''', '''swin''']
_UpperCamelCase : Optional[int] = ['''detr''']
def __init__( self : Any , _A : int = 256 , _A : int = 256 , _A : float = 0.1 , _A : bool = False , _A : Optional[Dict] = None , _A : Optional[Dict] = None , _A : float = 0.02 , _A : float = 1.0 , _A : float = 1.0 , _A : float = 1.0 , _A : float = 20.0 , _A : Optional[bool] = None , **_A : str , ) -> str:
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase : List[str] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_A , _A ):
lowercase : Optional[int] = backbone_config.pop('''model_type''' )
lowercase : List[str] = CONFIG_MAPPING[backbone_model_type]
lowercase : Union[str, Any] = config_class.from_dict(_A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {','.join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase : Any = DetrConfig()
else:
# verify that the decoder is supported
lowercase : Union[str, Any] = (
decoder_config.pop('''model_type''' ) if isinstance(_A , _A ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {','.join(self.decoders_supported )}""" )
if isinstance(_A , _A ):
lowercase : str = CONFIG_MAPPING[decoder_type]
lowercase : Dict = config_class.from_dict(_A )
lowercase : Tuple = backbone_config
lowercase : List[Any] = decoder_config
# main feature dimension for the model
lowercase : Optional[int] = fpn_feature_size
lowercase : List[Any] = mask_feature_size
# initializer
lowercase : Union[str, Any] = init_std
lowercase : Tuple = init_xavier_std
# Hungarian matcher && loss
lowercase : List[str] = cross_entropy_weight
lowercase : int = dice_weight
lowercase : List[Any] = mask_weight
lowercase : Tuple = use_auxiliary_loss
lowercase : Tuple = no_object_weight
lowercase : int = output_auxiliary_logits
lowercase : List[Any] = self.decoder_config.encoder_attention_heads
lowercase : List[Any] = self.decoder_config.num_hidden_layers
super().__init__(**_A )
@classmethod
def __a ( cls : Optional[int] , _A : PretrainedConfig , _A : PretrainedConfig , **_A : str ) -> Any:
"""simple docstring"""
return cls(
backbone_config=_A , decoder_config=_A , **_A , )
def __a ( self : Optional[int] ) -> Dict[str, any]:
"""simple docstring"""
lowercase : str = copy.deepcopy(self.__dict__ )
lowercase : Optional[Any] = self.backbone_config.to_dict()
lowercase : List[Any] = self.decoder_config.to_dict()
lowercase : Optional[int] = self.__class__.model_type
return output | 116 | 0 |
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def UpperCAmelCase__ (lowerCAmelCase_ = "" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(lowerCAmelCase_ ).text , "html.parser" )
__SCREAMING_SNAKE_CASE = soup.find_all("td" , attrs="titleColumn" )
__SCREAMING_SNAKE_CASE = soup.find_all("td" , class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowerCAmelCase_ , lowerCAmelCase_ )
}
def UpperCAmelCase__ (lowerCAmelCase_ = "IMDb_Top_250_Movies.csv" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_imdb_top_aaa_movies()
with open(lowerCAmelCase_ , "w" , newline="" ) as out_file:
__SCREAMING_SNAKE_CASE = csv.writer(lowerCAmelCase_ )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 54 |
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class snake_case :
SCREAMING_SNAKE_CASE_ : Optional[Union[str, Path]] = None
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : Optional[Dict] = None
SCREAMING_SNAKE_CASE_ : Optional[str] = None
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = True
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : Optional[Union[str, bool]] = None
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : Optional[Dict] = None
SCREAMING_SNAKE_CASE_ : Optional[str] = None
def lowercase_ ( self : str)-> "DownloadConfig":
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase__) for k, v in self.__dict__.items()})
| 217 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__a = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 369 |
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class UpperCAmelCase_ :
"""simple docstring"""
def lowerCamelCase ( self : Optional[Any] , snake_case_ : Optional[int] ):
raise NotImplementedError()
def lowerCamelCase ( self : Optional[int] ):
raise NotImplementedError()
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Tuple , snake_case_ : "AutoTokenizer" , snake_case_ : bool = False , **snake_case_ : Tuple ):
snake_case__ : Tuple = tokenizer
snake_case__ : List[str] = skip_prompt
snake_case__ : Optional[int] = decode_kwargs
# variables used in the streaming process
snake_case__ : Optional[int] = []
snake_case__ : Optional[int] = 0
snake_case__ : List[Any] = True
def lowerCamelCase ( self : List[str] , snake_case_ : int ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
snake_case__ : Optional[Any] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
snake_case__ : List[Any] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
snake_case__ : Tuple = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
snake_case__ : int = text[self.print_len :]
snake_case__ : Optional[int] = []
snake_case__ : int = 0
# If the last token is a CJK character, we print the characters.
elif len(snake_case_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
snake_case__ : str = text[self.print_len :]
self.print_len += len(snake_case_ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
snake_case__ : Dict = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(snake_case_ )
self.on_finalized_text(snake_case_ )
def lowerCamelCase ( self : int ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
snake_case__ : Union[str, Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
snake_case__ : Optional[Any] = text[self.print_len :]
snake_case__ : Tuple = []
snake_case__ : int = 0
else:
snake_case__ : int = """"""
snake_case__ : Union[str, Any] = True
self.on_finalized_text(snake_case_ , stream_end=snake_case_ )
def lowerCamelCase ( self : Optional[int] , snake_case_ : str , snake_case_ : bool = False ):
print(snake_case_ , flush=snake_case_ , end="""""" if not stream_end else None )
def lowerCamelCase ( self : int , snake_case_ : Optional[int] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Optional[int] , snake_case_ : "AutoTokenizer" , snake_case_ : bool = False , snake_case_ : Optional[float] = None , **snake_case_ : List[Any] ):
super().__init__(snake_case_ , snake_case_ , **snake_case_ )
snake_case__ : Dict = Queue()
snake_case__ : List[Any] = None
snake_case__ : int = timeout
def lowerCamelCase ( self : Dict , snake_case_ : str , snake_case_ : bool = False ):
self.text_queue.put(snake_case_ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : List[str] ):
return self
def lowerCamelCase ( self : str ):
snake_case__ : List[Any] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 43 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Optional[Any]:
stooge(_lowerCamelCase ,0 ,len(_lowerCamelCase ) - 1 )
return arr
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : List[str] ,_lowerCamelCase : Any ) -> str:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_lowerCAmelCase , _lowerCAmelCase : str = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_lowerCAmelCase : int = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_lowerCamelCase ,_lowerCamelCase ,(h - t) )
# Recursively sort last 2/3 elements
stooge(_lowerCamelCase ,i + t ,(_lowerCamelCase) )
# Recursively sort first 2/3 elements
stooge(_lowerCamelCase ,_lowerCamelCase ,(h - t) )
if __name__ == "__main__":
_a : List[str] = input('Enter numbers separated by a comma:\n').strip()
_a : List[Any] = [int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 44 | """simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ""
_UpperCamelCase : str = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , a__ = None , a__ = None , **a__ , ):
super().__init__(self , **a__ )
_lowerCAmelCase : Any = repo_info
_lowerCAmelCase : Optional[Any] = token
_lowerCAmelCase : Optional[int] = None
def __A ( self ):
if self.dir_cache is None:
_lowerCAmelCase : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_lowerCAmelCase : Any = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(a__ ): {"""name""": str(a__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __A ( self , a__ , a__ = "rb" , **a__ , ):
if not isinstance(self.repo_info , a__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_lowerCAmelCase : Tuple = hf_hub_url(self.repo_info.id , a__ , revision=self.repo_info.sha )
return fsspec.open(
a__ , mode=a__ , headers=get_authentication_headers_for_url(a__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __A ( self , a__ , **a__ ):
self._get_dirs()
_lowerCAmelCase : Union[str, Any] = self._strip_protocol(a__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(a__ )
def __A ( self , a__ , a__=False , **a__ ):
self._get_dirs()
_lowerCAmelCase : Any = PurePosixPath(path.strip("""/""" ) )
_lowerCAmelCase : List[str] = {}
for p, f in self.dir_cache.items():
_lowerCAmelCase : Any = PurePosixPath(p.strip("""/""" ) )
_lowerCAmelCase : Optional[int] = p.parent
if root == path:
_lowerCAmelCase : Dict = f
_lowerCAmelCase : Union[str, Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 44 | 1 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __A ( self : Optional[int] ):
A_ , A_ = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
A_ = "A painting of a squirrel eating a burger"
A_ = jax.device_count()
A_ = num_samples * [prompt]
A_ = sd_pipe.prepare_inputs(__lowerCAmelCase )
A_ = replicate(__lowerCAmelCase )
A_ = shard(__lowerCAmelCase )
A_ = jax.random.PRNGKey(0 )
A_ = jax.random.split(__lowerCAmelCase , jax.device_count() )
A_ = sd_pipe(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_inference_steps=25 , jit=__lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
A_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A_ = images[0, 253:256, 253:256, -1]
A_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A_ = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __A ( self : str ):
A_ = "stabilityai/stable-diffusion-2"
A_ , A_ = FlaxDPMSolverMultistepScheduler.from_pretrained(__lowerCAmelCase , subfolder="scheduler" )
A_ , A_ = FlaxStableDiffusionPipeline.from_pretrained(
__lowerCAmelCase , scheduler=__lowerCAmelCase , revision="bf16" , dtype=jnp.bfloataa , )
A_ = scheduler_params
A_ = "A painting of a squirrel eating a burger"
A_ = jax.device_count()
A_ = num_samples * [prompt]
A_ = sd_pipe.prepare_inputs(__lowerCAmelCase )
A_ = replicate(__lowerCAmelCase )
A_ = shard(__lowerCAmelCase )
A_ = jax.random.PRNGKey(0 )
A_ = jax.random.split(__lowerCAmelCase , jax.device_count() )
A_ = sd_pipe(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_inference_steps=25 , jit=__lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
A_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A_ = images[0, 253:256, 253:256, -1]
A_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A_ = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 | 371 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__a :str = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = RobertaPreLayerNormConfig.from_pretrained(
__UpperCamelCase ,architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A_ = torch.load(hf_hub_download(repo_id=__UpperCamelCase ,filename="pytorch_model.bin" ) )
A_ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A_ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A_ = tensor_value
A_ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__UpperCamelCase ,config=__UpperCamelCase ,state_dict=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
# convert tokenizer
A_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
tokenizer.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__a :Any = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 329 | 0 |
'''simple docstring'''
import math
def _lowerCAmelCase ( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
"""simple docstring"""
return math.pow(_UpperCamelCase , 2 ) - a
def _lowerCAmelCase ( _UpperCamelCase : float ) -> float:
"""simple docstring"""
return 2 * x
def _lowerCAmelCase ( _UpperCamelCase : float ) -> float:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =2.0
while start <= a:
_SCREAMING_SNAKE_CASE =math.pow(_UpperCamelCase , 2 )
return start
def _lowerCAmelCase ( _UpperCamelCase : float , _UpperCamelCase : int = 99_99 , _UpperCamelCase : float = 0.00_00_00_00_00_00_01 ) -> float:
"""simple docstring"""
if a < 0:
raise ValueError('math domain error' )
_SCREAMING_SNAKE_CASE =get_initial_point(_UpperCamelCase )
for _ in range(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =value
_SCREAMING_SNAKE_CASE =value - fx(_UpperCamelCase , _UpperCamelCase ) / fx_derivative(_UpperCamelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A__ ( A__ ):
A__ = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
A__ = 'CIDAS/clipseg-rd64-refined'
A__ = 'image_segmenter'
A__ = CLIPSegForImageSegmentation
A__ = ['image', 'text']
A__ = ['image']
def __init__( self : Any , *_a : Dict , **_a : str ) -> Any:
'''simple docstring'''
requires_backends(self , ['vision'] )
super().__init__(*_a , **_a )
def A ( self : int , _a : "Image" , _a : str ) -> Optional[Any]:
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=_a , return_tensors='pt' )
def A ( self : Dict , _a : Dict ) -> str:
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE =self.model(**_a ).logits
return logits
def A ( self : Any , _a : str ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =outputs.cpu().detach().numpy()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 47 | 1 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __snake_case ( a ):
def __init__( self : List[Any] , _snake_case : NestedDataStructureLike[PathLike] , _snake_case : Optional[NamedSplit] = None , _snake_case : Optional[Features] = None , _snake_case : str = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : Optional[str] = None , _snake_case : Optional[int] = None , **_snake_case : Union[str, Any] , ):
"""simple docstring"""
super().__init__(
_snake_case , split=_snake_case , features=_snake_case , cache_dir=_snake_case , keep_in_memory=_snake_case , streaming=_snake_case , num_proc=_snake_case , **_snake_case , )
UpperCAmelCase_ = field
UpperCAmelCase_ = path_or_paths if isinstance(_snake_case , _snake_case) else {self.split: path_or_paths}
UpperCAmelCase_ = Json(
cache_dir=_snake_case , data_files=_snake_case , features=_snake_case , field=_snake_case , **_snake_case , )
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
if self.streaming:
UpperCAmelCase_ = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
self.builder.download_and_prepare(
download_config=_snake_case , download_mode=_snake_case , verification_mode=_snake_case , base_path=_snake_case , num_proc=self.num_proc , )
UpperCAmelCase_ = self.builder.as_dataset(
split=self.split , verification_mode=_snake_case , in_memory=self.keep_in_memory)
return dataset
class __snake_case :
def __init__( self : Dict , _snake_case : Dataset , _snake_case : Union[PathLike, BinaryIO] , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , **_snake_case : List[Any] , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""")
UpperCAmelCase_ = dataset
UpperCAmelCase_ = path_or_buf
UpperCAmelCase_ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase_ = num_proc
UpperCAmelCase_ = '''utf-8'''
UpperCAmelCase_ = to_json_kwargs
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.to_json_kwargs.pop('''path_or_buf''' , _snake_case)
UpperCAmelCase_ = self.to_json_kwargs.pop('''orient''' , '''records''')
UpperCAmelCase_ = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False)
UpperCAmelCase_ = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True)
UpperCAmelCase_ = self.to_json_kwargs.pop('''compression''' , _snake_case)
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""")
if isinstance(self.path_or_buf , (str, bytes, os.PathLike)):
with fsspec.open(self.path_or_buf , '''wb''' , compression=_snake_case) as buffer:
UpperCAmelCase_ = self._write(file_obj=_snake_case , orient=_snake_case , lines=_snake_case , index=_snake_case , **self.to_json_kwargs)
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
''' was passed. Please provide a local path instead.''')
UpperCAmelCase_ = self._write(
file_obj=self.path_or_buf , orient=_snake_case , lines=_snake_case , index=_snake_case , **self.to_json_kwargs)
return written
def lowerCamelCase ( self : Any , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = args
UpperCAmelCase_ = query_table(
table=self.dataset.data , key=slice(_snake_case , offset + self.batch_size) , indices=self.dataset._indices , )
UpperCAmelCase_ = batch.to_pandas().to_json(
path_or_buf=_snake_case , orient=_snake_case , lines=_snake_case , index=_snake_case , **_snake_case)
if not json_str.endswith('''\n'''):
json_str += "\n"
return json_str.encode(self.encoding)
def lowerCamelCase ( self : Union[str, Any] , _snake_case : BinaryIO , _snake_case : Any , _snake_case : str , _snake_case : Any , **_snake_case : str , ):
"""simple docstring"""
UpperCAmelCase_ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset) , self.batch_size) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
UpperCAmelCase_ = self._batch_json((offset, orient, lines, index, to_json_kwargs))
written += file_obj.write(_snake_case)
else:
UpperCAmelCase_ , UpperCAmelCase_ = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _snake_case , _snake_case)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(_snake_case)
return written
| 7 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case_ : List[Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = ["DeiTFeatureExtractor"]
snake_case_ : List[str] = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 1 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__lowercase = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , **__lowercase) -> int:
super().__init__(**__lowercase)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , __lowercase , **__lowercase) -> Union[str, Any]:
return super().__call__(__lowercase , **__lowercase)
def UpperCamelCase__ ( self , **__lowercase) -> int:
__UpperCamelCase :int = {}
if "candidate_labels" in kwargs:
__UpperCamelCase :Union[str, Any] = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__UpperCamelCase :List[str] = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCamelCase__ ( self , __lowercase , __lowercase=None , __lowercase="This is a photo of {}.") -> List[Any]:
__UpperCamelCase :Dict = load_image(__lowercase)
__UpperCamelCase :Dict = self.image_processor(images=[image] , return_tensors=self.framework)
__UpperCamelCase :Optional[int] = candidate_labels
__UpperCamelCase :int = [hypothesis_template.format(__lowercase) for x in candidate_labels]
__UpperCamelCase :Optional[Any] = self.tokenizer(__lowercase , return_tensors=self.framework , padding=__lowercase)
__UpperCamelCase :List[str] = [text_inputs]
return inputs
def UpperCamelCase__ ( self , __lowercase) -> List[str]:
__UpperCamelCase :List[str] = model_inputs.pop('''candidate_labels''')
__UpperCamelCase :List[str] = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , __lowercase):
__UpperCamelCase :int = text_inputs[0]
else:
# Batching case.
__UpperCamelCase :str = text_inputs[0][0]
__UpperCamelCase :Dict = self.model(**__lowercase , **__lowercase)
__UpperCamelCase :str = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCamelCase__ ( self , __lowercase) -> Union[str, Any]:
__UpperCamelCase :Optional[Any] = model_outputs.pop('''candidate_labels''')
__UpperCamelCase :List[Any] = model_outputs['''logits'''][0]
if self.framework == "pt":
__UpperCamelCase :Tuple = logits.softmax(dim=-1).squeeze(-1)
__UpperCamelCase :Optional[int] = probs.tolist()
if not isinstance(__lowercase , __lowercase):
__UpperCamelCase :Dict = [scores]
elif self.framework == "tf":
__UpperCamelCase :Optional[Any] = stable_softmax(__lowercase , axis=-1)
__UpperCamelCase :Dict = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""")
__UpperCamelCase :Optional[int] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__lowercase , __lowercase) , key=lambda __lowercase: -x[0])
]
return result
| 43 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : torch.FloatTensor
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@register_to_config
def __init__( self, lowerCamelCase__ = 32, lowerCamelCase__ = 64, lowerCamelCase__ = 20, lowerCamelCase__ = 768, lowerCamelCase__=77, lowerCamelCase__=4, lowerCamelCase__ = 0.0, lowerCamelCase__ = "silu", lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = "linear", lowerCamelCase__ = "prd", lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, ):
super().__init__()
A : int = num_attention_heads
A : Tuple = attention_head_dim
A : Optional[int] = num_attention_heads * attention_head_dim
A : List[str] = additional_embeddings
A : int = time_embed_dim or inner_dim
A : Tuple = embedding_proj_dim or embedding_dim
A : Dict = clip_embed_dim or embedding_dim
A : List[str] = Timesteps(lowerCamelCase__, lowerCamelCase__, 0 )
A : Any = TimestepEmbedding(lowerCamelCase__, lowerCamelCase__, out_dim=lowerCamelCase__, act_fn=lowerCamelCase__ )
A : List[Any] = nn.Linear(lowerCamelCase__, lowerCamelCase__ )
if embedding_proj_norm_type is None:
A : Union[str, Any] = None
elif embedding_proj_norm_type == "layer":
A : str = nn.LayerNorm(lowerCamelCase__ )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
A : Optional[Any] = nn.Linear(lowerCamelCase__, lowerCamelCase__ )
if encoder_hid_proj_type is None:
A : Dict = None
elif encoder_hid_proj_type == "linear":
A : Union[str, Any] = nn.Linear(lowerCamelCase__, lowerCamelCase__ )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
A : List[str] = nn.Parameter(torch.zeros(1, num_embeddings + additional_embeddings, lowerCamelCase__ ) )
if added_emb_type == "prd":
A : Union[str, Any] = nn.Parameter(torch.zeros(1, 1, lowerCamelCase__ ) )
elif added_emb_type is None:
A : int = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
A : int = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, dropout=lowerCamelCase__, activation_fn="""gelu""", attention_bias=lowerCamelCase__, )
for d in range(lowerCamelCase__ )
] )
if norm_in_type == "layer":
A : int = nn.LayerNorm(lowerCamelCase__ )
elif norm_in_type is None:
A : Tuple = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
A : Optional[Any] = nn.LayerNorm(lowerCamelCase__ )
A : str = nn.Linear(lowerCamelCase__, lowerCamelCase__ )
A : Union[str, Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings], -1_0000.0 )
causal_attention_mask.triu_(1 )
A : Optional[Any] = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""", lowerCamelCase__, persistent=lowerCamelCase__ )
A : Dict = nn.Parameter(torch.zeros(1, lowerCamelCase__ ) )
A : Tuple = nn.Parameter(torch.zeros(1, lowerCamelCase__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _lowerCAmelCase ( self ):
A : Optional[int] = {}
def fn_recursive_add_processors(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
if hasattr(lowerCamelCase__, """set_processor""" ):
A : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''', lowerCamelCase__, lowerCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
return processors
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[Any] = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase__, lowerCamelCase__ ) and len(lowerCamelCase__ ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
if hasattr(lowerCamelCase__, """set_processor""" ):
if not isinstance(lowerCamelCase__, lowerCamelCase__ ):
module.set_processor(lowerCamelCase__ )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''', lowerCamelCase__, lowerCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
self.set_attn_processor(AttnProcessor() )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = True, ):
A : Optional[int] = hidden_states.shape[0]
A : Tuple = timestep
if not torch.is_tensor(lowerCamelCase__ ):
A : List[Any] = torch.tensor([timesteps], dtype=torch.long, device=hidden_states.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
A : Optional[int] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A : Optional[Any] = timesteps * torch.ones(lowerCamelCase__, dtype=timesteps.dtype, device=timesteps.device )
A : str = self.time_proj(lowerCamelCase__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
A : Union[str, Any] = timesteps_projected.to(dtype=self.dtype )
A : Tuple = self.time_embedding(lowerCamelCase__ )
if self.embedding_proj_norm is not None:
A : Optional[Any] = self.embedding_proj_norm(lowerCamelCase__ )
A : Any = self.embedding_proj(lowerCamelCase__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
A : List[str] = self.encoder_hidden_states_proj(lowerCamelCase__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
A : Tuple = self.proj_in(lowerCamelCase__ )
A : Dict = self.positional_embedding.to(hidden_states.dtype )
A : List[str] = []
A : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCamelCase__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
A : Optional[Any] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
A : List[str] = hidden_states[:, None, :]
A : Dict = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
A : List[Any] = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase__, -1, -1 )
additional_embeds.append(lowerCamelCase__ )
A : Union[str, Any] = torch.cat(
lowerCamelCase__, dim=1, )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
A : Dict = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
A : Any = F.pad(
lowerCamelCase__, (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
), value=0.0, )
A : Union[str, Any] = hidden_states + positional_embeddings
if attention_mask is not None:
A : List[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
A : Tuple = F.pad(lowerCamelCase__, (0, self.additional_embeddings), value=0.0 )
A : List[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
A : Any = attention_mask.repeat_interleave(self.config.num_attention_heads, dim=0 )
if self.norm_in is not None:
A : str = self.norm_in(lowerCamelCase__ )
for block in self.transformer_blocks:
A : Optional[Any] = block(lowerCamelCase__, attention_mask=lowerCamelCase__ )
A : Optional[int] = self.norm_out(lowerCamelCase__ )
if self.prd_embedding is not None:
A : Dict = hidden_states[:, -1]
else:
A : Optional[int] = hidden_states[:, additional_embeddings_len:]
A : Optional[Any] = self.proj_to_clip_embeddings(lowerCamelCase__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : str = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 116 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase = 6 ) -> None:
A_ : Node | None = None
A_ : Node | None = None
self.create_linked_list(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> None:
A_ : Dict = Node()
A_ : Optional[int] = current_node
A_ : List[str] = current_node
A_ : Dict = current_node
for _ in range(1 , _lowerCamelCase ):
A_ : Any = Node()
A_ : Optional[Any] = current_node
A_ : Any = previous_node
A_ : Optional[int] = current_node
A_ : List[str] = self.front
A_ : Optional[Any] = previous_node
def UpperCAmelCase_ ( self ) -> bool:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def UpperCAmelCase_ ( self ) -> Any | None:
self.check_can_perform_operation()
return self.front.data if self.front else None
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> None:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
A_ : str = self.rear.next
if self.rear:
A_ : List[Any] = data
def UpperCAmelCase_ ( self ) -> Any:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
A_ : Dict = self.front.data
A_ : int = None
return data
A_ : List[str] = self.front
A_ : List[str] = old_front.next
A_ : Any = old_front.data
A_ : Any = None
return data
def UpperCAmelCase_ ( self ) -> None:
if self.is_empty():
raise Exception("""Empty Queue""" )
def UpperCAmelCase_ ( self ) -> None:
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> None:
A_ : Any | None = None
A_ : Node | None = None
A_ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
UpperCamelCase__ : List[Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
UpperCamelCase__ : Optional[Any] = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
UpperCamelCase__ : List[Any] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
UpperCamelCase__ : Optional[Any] = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
UpperCamelCase__ : str = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
UpperCamelCase__ : int = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
UpperCamelCase__ : List[Any] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 164 | 0 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__snake_case : str = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__snake_case : Optional[int] = len(__lowercase ) - 1
def UpperCAmelCase ( self , UpperCAmelCase ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__snake_case : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __lowercase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__lowercase ) , 5 ) == 1
return output_values
def UpperCAmelCase ( self , UpperCAmelCase ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__snake_case : Tuple = self.basis_function(__lowercase )
__snake_case : Optional[Any] = 0.0
__snake_case : Dict = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def UpperCAmelCase ( self , UpperCAmelCase = 0.01 ) -> Optional[Any]:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
__snake_case : list[float] = [] # x coordinates of points to plot
__snake_case : list[float] = [] # y coordinates of points to plot
__snake_case : int = 0.0
while t <= 1:
__snake_case : str = self.bezier_curve_function(__lowercase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__snake_case : List[Any] = [i[0] for i in self.list_of_points]
__snake_case : List[str] = [i[1] for i in self.list_of_points]
plt.plot(
__lowercase , __lowercase , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(__lowercase , __lowercase , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 326 | import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : int = StableUnCLIPImgaImgPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
a__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a__ : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a__ : int = frozenset([] )
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Tuple = 32
__UpperCamelCase :Optional[int] = embedder_hidden_size
# image encoding components
__UpperCamelCase :Union[str, Any] = CLIPImageProcessor(crop_size=32 , size=32)
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__lowercase , projection_dim=__lowercase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
__UpperCamelCase :str = StableUnCLIPImageNormalizer(embedding_dim=__lowercase)
__UpperCamelCase :Optional[int] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''')
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
torch.manual_seed(0)
__UpperCamelCase :Dict = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ))
torch.manual_seed(0)
__UpperCamelCase :List[Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowercase , layers_per_block=1 , upcast_attention=__lowercase , use_linear_projection=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :Tuple = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='''v_prediction''' , set_alpha_to_one=__lowercase , steps_offset=1 , )
torch.manual_seed(0)
__UpperCamelCase :List[str] = AutoencoderKL()
__UpperCamelCase :Tuple = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0 , __lowercase=True) -> str:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :Union[str, Any] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :int = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :int = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase)).to(__lowercase)
if pil_image:
__UpperCamelCase :List[Any] = input_image * 0.5 + 0.5
__UpperCamelCase :Optional[Any] = input_image.clamp(0 , 1)
__UpperCamelCase :int = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
__UpperCamelCase :Optional[Any] = DiffusionPipeline.numpy_to_pil(__lowercase)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase :Tuple = self.get_dummy_components()
__UpperCamelCase :Any = StableUnCLIPImgaImgPipeline(**__lowercase)
__UpperCamelCase :Optional[Any] = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :List[Any] = self.get_dummy_inputs(__lowercase)
inputs.update({'''image_embeds''': None})
__UpperCamelCase :Any = sd_pipe(**__lowercase).images
__UpperCamelCase :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase :List[Any] = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Optional[Any] = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Optional[Any] = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__lowercase)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__lowercase)
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :Dict = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :Any = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :Optional[int] = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCamelCase :List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
__UpperCamelCase :Union[str, Any] = pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :Optional[Any] = pipe(
__lowercase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
__UpperCamelCase :int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 43 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : str ) -> str:
"""simple docstring"""
UpperCamelCase_ : int = tempfile.mkdtemp()
UpperCamelCase_ : int = BlipImageProcessor()
UpperCamelCase_ : Optional[Any] = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
UpperCamelCase_ : Tuple = BlipaProcessor(snake_case , snake_case )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , **snake_case : Dict ) -> List[str]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).tokenizer
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **snake_case : List[str] ) -> int:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).image_processor
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCamelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : List[str] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_ : Any = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCamelCase_ : List[str] = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 )
UpperCamelCase_ : str = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : int = self.get_image_processor()
UpperCamelCase_ : int = self.get_tokenizer()
UpperCamelCase_ : Tuple = BlipaProcessor(tokenizer=snake_case , image_processor=snake_case )
UpperCamelCase_ : Tuple = self.prepare_image_inputs()
UpperCamelCase_ : Union[str, Any] = image_processor(snake_case , return_tensors='np' )
UpperCamelCase_ : Tuple = processor(images=snake_case , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Any = self.get_image_processor()
UpperCamelCase_ : Optional[Any] = self.get_tokenizer()
UpperCamelCase_ : Any = BlipaProcessor(tokenizer=snake_case , image_processor=snake_case )
UpperCamelCase_ : Optional[int] = 'lower newer'
UpperCamelCase_ : str = processor(text=snake_case )
UpperCamelCase_ : List[str] = tokenizer(snake_case , return_token_type_ids=snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = self.get_image_processor()
UpperCamelCase_ : Dict = self.get_tokenizer()
UpperCamelCase_ : Optional[int] = BlipaProcessor(tokenizer=snake_case , image_processor=snake_case )
UpperCamelCase_ : Dict = 'lower newer'
UpperCamelCase_ : List[Any] = self.prepare_image_inputs()
UpperCamelCase_ : Dict = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.get_image_processor()
UpperCamelCase_ : Optional[int] = self.get_tokenizer()
UpperCamelCase_ : List[str] = BlipaProcessor(tokenizer=snake_case , image_processor=snake_case )
UpperCamelCase_ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_ : Tuple = processor.batch_decode(snake_case )
UpperCamelCase_ : str = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = self.get_image_processor()
UpperCamelCase_ : Optional[Any] = self.get_tokenizer()
UpperCamelCase_ : Dict = BlipaProcessor(tokenizer=snake_case , image_processor=snake_case )
UpperCamelCase_ : Tuple = 'lower newer'
UpperCamelCase_ : int = self.prepare_image_inputs()
UpperCamelCase_ : str = processor(text=snake_case , images=snake_case )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 50 | import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
debug_launcher(test_script.main )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
"""simple docstring"""
debug_launcher(test_ops.main )
| 50 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.