code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any]=1_3 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : List[str]=3_3 , UpperCAmelCase__ : Dict=3_2 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=3_7 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : str=5_1_2 , UpperCAmelCase__ : Dict=1_6 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : str=None , ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE = EsmModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE = EsmForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = EsmForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int = False
snake_case__ : List[str] = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : str = ()
snake_case__ : List[str] = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[str] = True
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = EsmModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = EsmModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
__SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]] )
__SCREAMING_SNAKE_CASE = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
__SCREAMING_SNAKE_CASE = create_position_ids_from_input_ids(UpperCAmelCase__ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase__ , UpperCAmelCase__ ) ) )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
__SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.empty(2 , 4 , 3_0 )
__SCREAMING_SNAKE_CASE = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__SCREAMING_SNAKE_CASE = torch.as_tensor([expected_single_positions, expected_single_positions] )
__SCREAMING_SNAKE_CASE = embeddings.create_position_ids_from_inputs_embeds(UpperCAmelCase__ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase__ , UpperCAmelCase__ ) ) )
@unittest.skip("Esm does not support embedding resizing" )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip("Esm does not support embedding resizing" )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
pass
@require_torch
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self : Dict ) -> Dict:
with torch.no_grad():
__SCREAMING_SNAKE_CASE = EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = 3_3
__SCREAMING_SNAKE_CASE = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
with torch.no_grad():
__SCREAMING_SNAKE_CASE = EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )[0]
# compare the actual values for a slice.
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 54 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
__SCREAMING_SNAKE_CASE = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
__SCREAMING_SNAKE_CASE = 1
if upper_limit > 0:
__SCREAMING_SNAKE_CASE = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowerCAmelCase_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
a__ : List[str] = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"The Catalan numbers from 0 through {N} are:")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 54 | 1 |
"""simple docstring"""
def __lowercase ( snake_case_ : int ) ->bool:
'''simple docstring'''
__A : Union[str, Any] = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __lowercase ( snake_case_ : int = 5000 ) ->int:
'''simple docstring'''
__A : str = [(i * (3 * i - 1)) // 2 for i in range(1 ,snake_case_ )]
for i, pentagonal_i in enumerate(snake_case_ ):
for j in range(snake_case_ ,len(snake_case_ ) ):
__A : int = pentagonal_nums[j]
__A : str = pentagonal_i + pentagonal_j
__A : int = pentagonal_j - pentagonal_i
if is_pentagonal(snake_case_ ) and is_pentagonal(snake_case_ ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 291 |
"""simple docstring"""
from math import factorial
def __lowercase ( snake_case_ : int ,snake_case_ : int ) ->int:
'''simple docstring'''
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(snake_case_ ) // (factorial(snake_case_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"""If a class of 40 students must be arranged into groups of""",
f'''4 for group projects, there are {combinations(40, 4)} ways''',
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f'''are {combinations(10, 3)} ways that first, second and''',
"""third place can be awarded.""",
)
| 291 | 1 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__SCREAMING_SNAKE_CASE : Optional[Any] = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def UpperCamelCase_ ( _UpperCAmelCase : Union[str, Any]=None ) -> Tuple:
"""simple docstring"""
if subparsers is not None:
_UpperCAmelCase : List[Any] = subparsers.add_parser("tpu-config" , description=_description )
else:
_UpperCAmelCase : Any = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
_UpperCAmelCase : Optional[Any] = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=_UpperCAmelCase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=_UpperCAmelCase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
_UpperCAmelCase : Tuple = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=_UpperCAmelCase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> str:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_UpperCAmelCase ):
_UpperCAmelCase : Optional[Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
_UpperCAmelCase : List[Any] = defaults.command_file
if not args.command and defaults.commands is not None:
_UpperCAmelCase : Tuple = defaults.commands
if not args.tpu_name:
_UpperCAmelCase : Union[str, Any] = defaults.tpu_name
if not args.tpu_zone:
_UpperCAmelCase : List[str] = defaults.tpu_zone
if args.accelerate_version == "dev":
_UpperCAmelCase : int = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
_UpperCAmelCase : List[Any] = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , _UpperCAmelCase ):
_UpperCAmelCase : int = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
_UpperCAmelCase : Tuple = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _UpperCAmelCase ):
_UpperCAmelCase : Tuple = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
_UpperCAmelCase : Any = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
_UpperCAmelCase : int = "; ".join(_UpperCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
_UpperCAmelCase : Tuple = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {' '.join(_UpperCAmelCase )}""" )
return
subprocess.run(_UpperCAmelCase )
print("Successfully setup pod." )
def UpperCamelCase_ ( ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Any = tpu_command_parser()
_UpperCAmelCase : Tuple = parser.parse_args()
tpu_command_launcher(_UpperCAmelCase )
| 31 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : List[str] = '''▁'''
UpperCAmelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[int] = BertGenerationTokenizer
UpperCamelCase : str = False
UpperCamelCase : Tuple = True
def UpperCAmelCase_ ( self ):
super().setUp()
__A : Tuple = BertGenerationTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self ):
__A : str = '<s>'
__A : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCAmelCase_ ( self ):
__A : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(_A ) , 1002 )
def UpperCAmelCase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCAmelCase_ ( self ):
__A : str = BertGenerationTokenizer(_A , keep_accents=_A )
__A : Dict = tokenizer.tokenize('This is a test' )
self.assertListEqual(_A , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [285, 46, 10, 170, 382] , )
__A : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__A : Dict = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__A : Optional[int] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCAmelCase_ ( self ):
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'Hello World!'
__A : Optional[Any] = [18536, 2260, 101]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def UpperCAmelCase_ ( self ):
__A : Dict = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
__A : int = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def UpperCAmelCase_ ( self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__A : Tuple = list(self.big_tokenizer.get_vocab().keys() )[:10]
__A : List[Any] = ' '.join(_A )
__A : Union[str, Any] = self.big_tokenizer.encode_plus(_A , return_tensors='pt' , return_token_type_ids=_A )
__A : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=_A )
__A : int = BertGenerationConfig()
__A : List[str] = BertGenerationEncoder(_A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def UpperCAmelCase_ ( self ):
# fmt: off
__A : str = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 280 | 0 |
from math import sqrt
def __UpperCamelCase ( _A : Optional[int] ) ->Tuple:
"""simple docstring"""
assert isinstance(_A , _A ) and (
number >= 0
), "'number' must been an int and positive"
lowerCamelCase_ =True
# 0 and 1 are none primes.
if number <= 1:
lowerCamelCase_ =False
for divisor in range(2 , int(round(sqrt(_A ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCamelCase_ =False
break
# precondition
assert isinstance(_A , _A ), "'status' must been from type bool"
return status
def __UpperCamelCase ( _A : Dict ) ->int:
"""simple docstring"""
assert isinstance(_A , _A ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCamelCase_ =list(range(2 , n + 1 ) )
lowerCamelCase_ =[] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_A ) ):
for j in range(i + 1 , len(_A ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCamelCase_ =0
# filters actual prime numbers.
lowerCamelCase_ =[x for x in begin_list if x != 0]
# precondition
assert isinstance(_A , _A ), "'ans' must been from type list"
return ans
def __UpperCamelCase ( _A : Dict ) ->Tuple:
"""simple docstring"""
assert isinstance(_A , _A ) and (n > 2), "'N' must been an int and > 2"
lowerCamelCase_ =[]
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_A ):
ans.append(_A )
# precondition
assert isinstance(_A , _A ), "'ans' must been from type list"
return ans
def __UpperCamelCase ( _A : List[Any] ) ->Optional[Any]:
"""simple docstring"""
assert isinstance(_A , _A ) and number >= 0, "'number' must been an int and >= 0"
lowerCamelCase_ =[] # this list will be returns of the function.
# potential prime number factors.
lowerCamelCase_ =2
lowerCamelCase_ =number
if number == 0 or number == 1:
ans.append(_A )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_A ):
while quotient != 1:
if is_prime(_A ) and (quotient % factor == 0):
ans.append(_A )
quotient /= factor
else:
factor += 1
else:
ans.append(_A )
# precondition
assert isinstance(_A , _A ), "'ans' must been from type list"
return ans
def __UpperCamelCase ( _A : Optional[int] ) ->int:
"""simple docstring"""
assert isinstance(_A , _A ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCamelCase_ =0
# prime factorization of 'number'
lowerCamelCase_ =prime_factorization(_A )
lowerCamelCase_ =max(_A )
# precondition
assert isinstance(_A , _A ), "'ans' must been from type int"
return ans
def __UpperCamelCase ( _A : Dict ) ->int:
"""simple docstring"""
assert isinstance(_A , _A ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCamelCase_ =0
# prime factorization of 'number'
lowerCamelCase_ =prime_factorization(_A )
lowerCamelCase_ =min(_A )
# precondition
assert isinstance(_A , _A ), "'ans' must been from type int"
return ans
def __UpperCamelCase ( _A : str ) ->Tuple:
"""simple docstring"""
assert isinstance(_A , _A ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _A ), "compare bust been from type bool"
return number % 2 == 0
def __UpperCamelCase ( _A : List[Any] ) ->Any:
"""simple docstring"""
assert isinstance(_A , _A ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _A ), "compare bust been from type bool"
return number % 2 != 0
def __UpperCamelCase ( _A : List[str] ) ->Optional[Any]:
"""simple docstring"""
assert (
isinstance(_A , _A ) and (number > 2) and is_even(_A )
), "'number' must been an int, even and > 2"
lowerCamelCase_ =[] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCamelCase_ =get_prime_numbers(_A )
lowerCamelCase_ =len(_A )
# run variable for while-loops.
lowerCamelCase_ =0
lowerCamelCase_ =None
# exit variable. for break up the loops
lowerCamelCase_ =True
while i < len_pn and loop:
lowerCamelCase_ =i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCamelCase_ =False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_A , _A )
and (len(_A ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __UpperCamelCase ( _A : Dict , _A : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
assert (
isinstance(_A , _A )
and isinstance(_A , _A )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCamelCase_ =0
while numbera != 0:
lowerCamelCase_ =numbera % numbera
lowerCamelCase_ =numbera
lowerCamelCase_ =rest
# precondition
assert isinstance(_A , _A ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __UpperCamelCase ( _A : int , _A : Optional[Any] ) ->Any:
"""simple docstring"""
assert (
isinstance(_A , _A )
and isinstance(_A , _A )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCamelCase_ =1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCamelCase_ =prime_factorization(_A )
lowerCamelCase_ =prime_factorization(_A )
elif numbera == 1 or numbera == 1:
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =max(_A , _A )
lowerCamelCase_ =0
lowerCamelCase_ =0
lowerCamelCase_ =[] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCamelCase_ =prime_fac_a.count(_A )
lowerCamelCase_ =prime_fac_a.count(_A )
for _ in range(max(_A , _A ) ):
ans *= n
else:
lowerCamelCase_ =prime_fac_a.count(_A )
for _ in range(_A ):
ans *= n
done.append(_A )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCamelCase_ =prime_fac_a.count(_A )
for _ in range(_A ):
ans *= n
done.append(_A )
# precondition
assert isinstance(_A , _A ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __UpperCamelCase ( _A : int ) ->List[str]:
"""simple docstring"""
assert isinstance(_A , _A ) and (n >= 0), "'number' must been a positive int"
lowerCamelCase_ =0
lowerCamelCase_ =2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_A ):
ans += 1
# precondition
assert isinstance(_A , _A ) and is_prime(
_A ), "'ans' must been a prime number and from type int"
return ans
def __UpperCamelCase ( _A : str , _A : Union[str, Any] ) ->Tuple:
"""simple docstring"""
assert (
is_prime(_A ) and is_prime(_A ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCamelCase_ =p_number_a + 1 # jump to the next number
lowerCamelCase_ =[] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_A ):
number += 1
while number < p_number_a:
ans.append(_A )
number += 1
# fetch the next prime number.
while not is_prime(_A ):
number += 1
# precondition
assert (
isinstance(_A , _A )
and ans[0] != p_number_a
and ans[len(_A ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __UpperCamelCase ( _A : Tuple ) ->int:
"""simple docstring"""
assert isinstance(_A , _A ) and (n >= 1), "'n' must been int and >= 1"
lowerCamelCase_ =[] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_A )
# precondition
assert ans[0] == 1 and ans[len(_A ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __UpperCamelCase ( _A : Optional[Any] ) ->Optional[int]:
"""simple docstring"""
assert isinstance(_A , _A ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCamelCase_ =get_divisors(_A )
# precondition
assert (
isinstance(_A , _A )
and (divisors[0] == 1)
and (divisors[len(_A ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def __UpperCamelCase ( _A : Dict , _A : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
assert (
isinstance(_A , _A )
and isinstance(_A , _A )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCamelCase_ =gcd(abs(_A ) , abs(_A ) )
# precondition
assert (
isinstance(_A , _A )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __UpperCamelCase ( _A : Any ) ->str:
"""simple docstring"""
assert isinstance(_A , _A ) and (n >= 0), "'n' must been a int and >= 0"
lowerCamelCase_ =1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def __UpperCamelCase ( _A : List[str] ) ->Optional[int]:
"""simple docstring"""
assert isinstance(_A , _A ) and (n >= 0), "'n' must been an int and >= 0"
lowerCamelCase_ =0
lowerCamelCase_ =1
lowerCamelCase_ =1 # this will be return
for _ in range(n - 1 ):
lowerCamelCase_ =ans
ans += fiba
lowerCamelCase_ =tmp
return ans
| 359 |
import unittest
from knapsack import greedy_knapsack as kp
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =[10, 20, 30, 40, 50, 60]
lowerCamelCase_ =[2, 4, 6, 8, 10, 12]
lowerCamelCase_ =100
self.assertEqual(kp.calc_profit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 210 )
def _snake_case ( self )-> Any:
self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , """max_weight must greater than zero.""" )
def _snake_case ( self )-> Dict:
self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , """Weight can not be negative.""" )
def _snake_case ( self )-> Dict:
self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , """Profit can not be negative.""" )
def _snake_case ( self )-> Tuple:
self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , """max_weight must greater than zero.""" )
def _snake_case ( self )-> Any:
self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 49 | 0 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = TaConfig.from_json_file(A_ )
print(f'''Building PyTorch model from configuration: {config}''' )
__magic_name__ = TaForConditionalGeneration(A_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(A_, A_, A_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(A_ )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 88 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : int = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__lowerCAmelCase : Any = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
)
__lowerCAmelCase : str = '|'.join(sys.argv[1:])
__lowerCAmelCase : Tuple = re.compile(RF'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase : Union[str, Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 88 | 1 |
def __UpperCamelCase ( lowercase__ : list ) -> list:
'''simple docstring'''
lowerCAmelCase_ : Any = len(lowercase__ )
for i in range(1 , lowercase__ ):
lowerCAmelCase_ : List[str] = collection[i]
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Dict = i - 1
while low <= high:
lowerCAmelCase_ : Optional[Any] = (low + high) // 2
if val < collection[mid]:
lowerCAmelCase_ : Optional[int] = mid - 1
else:
lowerCAmelCase_ : List[Any] = mid + 1
for j in range(lowercase__ , lowercase__ , -1 ):
lowerCAmelCase_ : List[Any] = collection[j - 1]
lowerCAmelCase_ : List[Any] = val
return collection
if __name__ == "__main__":
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 28 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class __a ( __UpperCamelCase ):
__snake_case : Optional[Any] = """mra"""
def __init__( self : List[str] , UpperCAmelCase : Tuple=5_02_65 , UpperCAmelCase : str=7_68 , UpperCAmelCase : int=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Tuple=30_72 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : List[str]=5_12 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Tuple=0.02 , UpperCAmelCase : int=1e-5 , UpperCAmelCase : Optional[int]="absolute" , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : Any="full" , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : Any=1 , UpperCAmelCase : int=0 , UpperCAmelCase : int=2 , **UpperCAmelCase : Tuple , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = vocab_size
lowerCAmelCase_ : Optional[int] = max_position_embeddings
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Tuple = num_attention_heads
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : str = type_vocab_size
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Optional[int] = position_embedding_type
lowerCAmelCase_ : Any = block_per_row
lowerCAmelCase_ : int = approx_mode
lowerCAmelCase_ : Union[str, Any] = initial_prior_first_n_blocks
lowerCAmelCase_ : Dict = initial_prior_diagonal_n_blocks
| 28 | 1 |
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
lowerCAmelCase__ : str = 1.5
lowerCAmelCase__ : Any = int(factor * num_class_images )
lowerCAmelCase__ : Optional[Any] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=_lowerCamelCase , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=_lowerCamelCase )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
lowerCAmelCase__ : Dict = client.query(text=_lowerCamelCase )
if len(_lowerCamelCase ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowerCAmelCase__ : List[Any] = int(factor * num_images )
lowerCAmelCase__ : Any = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=_lowerCamelCase , aesthetic_weight=0.1 , )
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : int = tqdm(desc='''downloading real regularization images''' , total=_lowerCamelCase )
with open(f'{class_data_dir}/caption.txt' , '''w''' ) as fa, open(f'{class_data_dir}/urls.txt' , '''w''' ) as fa, open(
f'{class_data_dir}/images.txt' , '''w''' ) as fa:
while total < num_class_images:
lowerCAmelCase__ : List[str] = class_images[count]
count += 1
try:
lowerCAmelCase__ : Union[str, Any] = requests.get(images['''url'''] )
if img.status_code == 2_00:
lowerCAmelCase__ : List[str] = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(f'{class_data_dir}/images/{total}.jpg' + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : Optional[int] = argparse.ArgumentParser('''''' , add_help=_lowerCamelCase )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=_lowerCamelCase , type=_lowerCamelCase )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=_lowerCamelCase , type=_lowerCamelCase )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=_lowerCamelCase )
return parser.parse_args()
if __name__ == "__main__":
__UpperCamelCase : Any = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 106 | import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class snake_case_ ( unittest.TestCase ):
def __init__( self : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=2 , lowercase_ : Union[str, Any]=56 , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : int=True , lowercase_ : Any=99 , lowercase_ : int=32 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=7 , lowercase_ : Dict="gelu_new" , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : Optional[Any]=16 , lowercase_ : List[Any]=2 , lowercase_ : Dict=0.02 , lowercase_ : int=4 , lowercase_ : Tuple="block_sparse" , lowercase_ : Dict=True , lowercase_ : Optional[int]=False , lowercase_ : Dict=2 , lowercase_ : int=3 , ) -> Union[str, Any]:
lowercase__ : Dict = parent
lowercase__ : Dict = batch_size
lowercase__ : Tuple = seq_length
lowercase__ : Dict = is_training
lowercase__ : Dict = use_attention_mask
lowercase__ : Tuple = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = vocab_size
lowercase__ : Any = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : str = intermediate_size
lowercase__ : int = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Optional[Any] = max_position_embeddings
lowercase__ : Union[str, Any] = type_vocab_size
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Any = initializer_range
lowercase__ : List[str] = num_choices
lowercase__ : str = rescale_embeddings
lowercase__ : Optional[Any] = attention_type
lowercase__ : Optional[int] = use_bias
lowercase__ : Optional[int] = block_size
lowercase__ : str = num_random_blocks
def __UpperCamelCase ( self : str ) -> Optional[Any]:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_attention_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Optional[int] = None
if self.use_token_type_ids:
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : int = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Union[str, Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class snake_case_ ( __A ,unittest.TestCase ):
__A : Optional[int] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__A : List[str] = False
__A : Any = False
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
lowercase__ : Union[str, Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : List[str] ) -> Any:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Tuple ) -> str:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
super().test_hidden_states_output()
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
for model_class_name in self.all_model_classes:
lowercase__ : Optional[Any] = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(lowercase_ )
def __UpperCamelCase ( self : int ) -> Optional[int]:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : str ) -> Any:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ )
lowercase__ : Optional[Any] = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ : Tuple , lowercase_ : int=None , **lowercase_ : Dict ):
return model(input_ids=lowercase_ , attention_mask=lowercase_ , **lowercase_ )
with self.subTest("JIT Enabled" ):
lowercase__ : int = model_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowercase__ : Any = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCamelCase ( self : List[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Any]=1E-5 , lowercase_ : Any="outputs" , lowercase_ : List[str]=None ) -> List[Any]:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
| 87 | 0 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__UpperCamelCase : int = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
__UpperCamelCase : Dict = F'''https://www.google.com/search?q={query}&num=100'''
__UpperCamelCase : Tuple = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
__UpperCamelCase : Tuple = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
__UpperCamelCase : Optional[Any] = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 74 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__UpperCamelCase : Union[str, Any] = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Dict = _TestCommandArgs(dataset=A_ , all_configs=A_ , save_infos=A_ )
lowerCAmelCase__ : Optional[int] = TestCommand(*A_ )
test_command.run()
lowerCAmelCase__ : int = os.path.join(A_ , '''README.md''' )
assert os.path.exists(A_ )
lowerCAmelCase__ : List[Any] = DatasetInfosDict.from_directory(A_ )
lowerCAmelCase__ : List[str] = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_35_15_63,
'''num_examples''': 1_00_00,
},
{
'''name''': '''validation''',
'''num_bytes''': 23_84_18,
'''num_examples''': 10_00,
},
] , download_size=3_94_06_80 , dataset_size=2_58_99_81 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = getattr(dataset_infos['''default'''] , A_ ), getattr(expected_dataset_infos['''default'''] , A_ )
if key == "num_bytes":
assert is_apercent_close(A_ , A_ )
elif key == "splits":
assert list(A_ ) == list(A_ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 74 | 1 |
"""simple docstring"""
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : int = 9
_a : List[str] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
_a : Optional[Any] = kruskal(UpperCamelCase__ , UpperCamelCase__ )
_a : List[str] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(UpperCamelCase__ ) == sorted(UpperCamelCase__ )
| 294 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_snake_case = logging.getLogger(__name__)
class UpperCamelCase ( snake_case_ ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int]=-1 ) -> Tuple:
# in NER datasets, the last column is usually reserved for NER label
_a : Optional[int] = label_idx
def _lowercase ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : Any = mode.value
_a : Optional[int] = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" )
_a : int = 1
_a : int = []
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
_a : str = []
_a : str = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
_a : List[str] = []
_a : str = []
else:
_a : List[Any] = line.split(""" """ )
words.append(splits[0] )
if len(UpperCAmelCase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
return examples
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Union[str, Any]:
_a : List[str] = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(UpperCAmelCase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_a : int = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(UpperCAmelCase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
_a : List[Any] = f.read().splitlines()
if "O" not in labels:
_a : Union[str, Any] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCamelCase ( snake_case_ ):
def __init__( self : Union[str, Any] ) -> List[str]:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
_a : Optional[int] = f.read().splitlines()
if "O" not in labels:
_a : Optional[Any] = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : List[Any] = mode.value
_a : Union[str, Any] = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" )
_a : List[str] = 1
_a : Optional[Any] = []
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(UpperCAmelCase__ ):
_a : List[Any] = []
_a : Any = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
return examples
def _lowercase ( self : Tuple , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Dict:
_a : Optional[Any] = 0
for sentence in parse_incr(UpperCAmelCase__ ):
_a : List[str] = preds_list[example_id]
_a : str = """"""
for token in sentence:
out += f"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(UpperCAmelCase__ )
example_id += 1
def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 294 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Union[str, Any] = BioGptTokenizer
lowerCAmelCase : List[str] = False
def __lowercase ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a : Dict = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
_a : List[str] = dict(zip(_UpperCAmelCase ,range(len(_UpperCAmelCase ) ) ) )
_a : int = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
_a : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) )
with open(self.merges_file ,'w' ) as fp:
fp.write('\n'.join(_UpperCAmelCase ) )
def __lowercase ( self : Tuple ,_UpperCAmelCase : List[str] ):
_a : str = 'lower newer'
_a : Tuple = 'lower newer'
return input_text, output_text
def __lowercase ( self : List[str] ):
_a : List[str] = BioGptTokenizer(self.vocab_file ,self.merges_file )
_a : Any = 'lower'
_a : List[Any] = ['low', 'er</w>']
_a : List[str] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict = tokens + ['<unk>']
_a : Any = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,_UpperCAmelCase )
@slow
def __lowercase ( self : Union[str, Any] ):
_a : List[Any] = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
_a : List[Any] = tokenizer.encode('sequence builders' ,add_special_tokens=_UpperCAmelCase )
_a : Union[str, Any] = tokenizer.encode('multi-sequence build' ,add_special_tokens=_UpperCAmelCase )
_a : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
_a : Dict = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase ,_UpperCAmelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 107 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
set_seed(770)
__lowerCAmelCase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
__lowerCAmelCase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
__lowerCAmelCase = os.path.dirname(os.path.abspath(__file__))
__lowerCAmelCase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
__lowerCAmelCase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> Optional[int]:
_a : int = model_type
if use_small:
key += "_small"
return os.path.join(lowerCAmelCase_ , REMOTE_MODEL_PATHS[key]['file_name'] )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
hf_hub_download(repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , local_dir=lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_="text" ) -> List[str]:
if model_type == "text":
_a : List[str] = BarkSemanticModel
_a : Optional[Any] = BarkSemanticConfig
_a : Any = BarkSemanticGenerationConfig
elif model_type == "coarse":
_a : Tuple = BarkCoarseModel
_a : str = BarkCoarseConfig
_a : str = BarkCoarseGenerationConfig
elif model_type == "fine":
_a : List[str] = BarkFineModel
_a : Optional[Any] = BarkFineConfig
_a : str = BarkFineGenerationConfig
else:
raise NotImplementedError()
_a : Dict = f"""{model_type}_small""" if use_small else model_type
_a : Union[str, Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCAmelCase_ ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info['repo_id'] , model_info['file_name'] )
_a : int = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
# this is a hack
_a : List[Any] = checkpoint['model_args']
if "input_vocab_size" not in model_args:
_a : Dict = model_args['vocab_size']
_a : Dict = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_a : List[Any] = model_args.pop('n_head' )
_a : Any = model_args.pop('n_embd' )
_a : List[Any] = model_args.pop('n_layer' )
_a : Optional[int] = ConfigClass(**checkpoint['model_args'] )
_a : List[str] = ModelClass(config=lowerCAmelCase_ )
_a : Tuple = GenerationConfigClass()
_a : Optional[Any] = model_generation_config
_a : Optional[Any] = checkpoint['model']
# fixup checkpoint
_a : int = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(lowerCAmelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
_a : str = k[len(lowerCAmelCase_ ) :]
for old_layer_name in new_layer_name_dict:
_a : List[Any] = new_k.replace(lowerCAmelCase_ , new_layer_name_dict[old_layer_name] )
_a : List[Any] = state_dict.pop(lowerCAmelCase_ )
_a : List[Any] = set(state_dict.keys() ) - set(model.state_dict().keys() )
_a : Tuple = {k for k in extra_keys if not k.endswith('.attn.bias' )}
_a : Tuple = set(model.state_dict().keys() ) - set(state_dict.keys() )
_a : Optional[Any] = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
_a : Dict = model.num_parameters(exclude_embeddings=lowerCAmelCase_ )
_a : Tuple = checkpoint['best_val_loss'].item()
logger.info(f"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowerCAmelCase_ , 3 )} loss""" )
model.eval()
model.to(lowerCAmelCase_ )
del checkpoint, state_dict
return model
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_="text" ) -> List[Any]:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_a : Optional[int] = 'cpu' # do conversion on cpu
_a : Tuple = _get_ckpt_path(lowerCAmelCase_ , use_small=lowerCAmelCase_ )
_a : List[Any] = _load_model(lowerCAmelCase_ , lowerCAmelCase_ , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
# load bark initial model
_a : Any = _bark_load_model(lowerCAmelCase_ , 'cpu' , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
if model_type == "text":
_a : int = bark_model['model']
if model.num_parameters(exclude_embeddings=lowerCAmelCase_ ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
_a : Any = 5
_a : List[str] = 10
if model_type in ["text", "coarse"]:
_a : Dict = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
_a : Dict = bark_model(lowerCAmelCase_ )[0]
_a : Tuple = model(lowerCAmelCase_ )
# take last logits
_a : Optional[int] = output_new_model_total.logits[:, [-1], :]
else:
_a : List[str] = 3
_a : List[Any] = 8
_a : Tuple = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_a : Union[str, Any] = model(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = bark_model(lowerCAmelCase_ , lowerCAmelCase_ )
_a : List[str] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('initial and new outputs are not equal' )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> Any:
_a : Any = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase_ , 'config.json' ) )
_a : Any = BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase_ , 'config.json' ) )
_a : List[Any] = BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase_ , 'config.json' ) )
_a : List[str] = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
_a : str = BarkSemanticModel.from_pretrained(lowerCAmelCase_ )
_a : Dict = BarkCoarseModel.from_pretrained(lowerCAmelCase_ )
_a : int = BarkFineModel.from_pretrained(lowerCAmelCase_ )
_a : List[Any] = EncodecModel.from_pretrained('facebook/encodec_24khz' )
_a : Any = BarkConfig.from_sub_model_configs(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a : List[str] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_a : Optional[Any] = BarkModel(lowerCAmelCase_ )
_a : List[str] = semantic
_a : Union[str, Any] = coarseAcoustic
_a : Optional[int] = fineAcoustic
_a : Optional[Any] = codec
_a : List[Any] = bark_generation_config
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
bark.save_pretrained(lowerCAmelCase_ , repo_id=lowerCAmelCase_ , push_to_hub=lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
__lowerCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 107 | 1 |
'''simple docstring'''
import torch
from torch import nn
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any]=1 , snake_case_ : Tuple=False ):
super().__init__()
snake_case__ : List[str] = n_token
snake_case__ : Optional[int] = d_embed
snake_case__ : Any = d_proj
snake_case__ : Any = cutoffs + [n_token]
snake_case__ : Optional[int] = [0] + self.cutoffs
snake_case__ : Union[str, Any] = div_val
snake_case__ : Optional[Any] = self.cutoffs[0]
snake_case__ : int = len(self.cutoffs ) - 1
snake_case__ : int = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
snake_case__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
snake_case__ : List[str] = nn.Parameter(torch.zeros(self.n_clusters ) )
snake_case__ : Any = nn.ModuleList()
snake_case__ : List[str] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case_ , snake_case_ ) ) )
else:
self.out_projs.append(snake_case_ )
self.out_layers.append(nn.Linear(snake_case_ , snake_case_ ) )
else:
for i in range(len(self.cutoffs ) ):
snake_case__ , snake_case__ : int = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case__ : List[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case_ , snake_case_ ) ) )
self.out_layers.append(nn.Linear(snake_case_ , r_idx - l_idx ) )
snake_case__ : int = keep_order
def lowerCamelCase ( self : Dict , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : List[str] ):
if proj is None:
snake_case__ : List[str] = nn.functional.linear(snake_case_ , snake_case_ , bias=snake_case_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
snake_case__ : int = nn.functional.linear(snake_case_ , proj.t().contiguous() )
snake_case__ : Optional[Any] = nn.functional.linear(snake_case_ , snake_case_ , bias=snake_case_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowerCamelCase ( self : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Tuple=None , snake_case_ : str=False ):
if labels is not None:
# Shift so that tokens < n predict n
snake_case__ : Dict = hidden[..., :-1, :].contiguous()
snake_case__ : Any = labels[..., 1:].contiguous()
snake_case__ : Tuple = hidden.view(-1 , hidden.size(-1 ) )
snake_case__ : Optional[Any] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
snake_case__ : List[Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
snake_case__ : str = self._compute_logit(snake_case_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
snake_case__ : Dict = labels != -100
snake_case__ : Any = torch.zeros_like(snake_case_ , dtype=hidden.dtype , device=hidden.device )
snake_case__ : int = (
-nn.functional.log_softmax(snake_case_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
snake_case__ : str = nn.functional.log_softmax(snake_case_ , dim=-1 )
else:
# construct weights and biases
snake_case__ , snake_case__ : Optional[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
snake_case__ , snake_case__ : str = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case__ : Optional[Any] = self.out_layers[0].weight[l_idx:r_idx]
snake_case__ : int = self.out_layers[0].bias[l_idx:r_idx]
else:
snake_case__ : Any = self.out_layers[i].weight
snake_case__ : Optional[int] = self.out_layers[i].bias
if i == 0:
snake_case__ : List[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
snake_case__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case_ )
biases.append(snake_case_ )
snake_case__ , snake_case__ , snake_case__ : Any = weights[0], biases[0], self.out_projs[0]
snake_case__ : Dict = self._compute_logit(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
snake_case__ : List[str] = nn.functional.log_softmax(snake_case_ , dim=1 )
if labels is None:
snake_case__ : Union[str, Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
snake_case__ : Optional[int] = torch.zeros_like(snake_case_ , dtype=hidden.dtype , device=hidden.device )
snake_case__ : List[str] = 0
snake_case__ : Dict = [0] + self.cutoffs
for i in range(len(snake_case_ ) - 1 ):
snake_case__ , snake_case__ : int = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
snake_case__ : str = (labels >= l_idx) & (labels < r_idx)
snake_case__ : Union[str, Any] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
snake_case__ : List[Any] = labels.index_select(0 , snake_case_ ) - l_idx
snake_case__ : Dict = head_logprob.index_select(0 , snake_case_ )
snake_case__ : List[str] = hidden.index_select(0 , snake_case_ )
else:
snake_case__ : List[str] = hidden
if i == 0:
if labels is not None:
snake_case__ : str = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
snake_case__ : Tuple = head_logprob[:, : self.cutoffs[0]]
else:
snake_case__ , snake_case__ , snake_case__ : Dict = weights[i], biases[i], self.out_projs[i]
snake_case__ : Optional[Any] = self._compute_logit(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = nn.functional.log_softmax(snake_case_ , dim=1 )
snake_case__ : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
snake_case__ : str = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
snake_case__ : List[str] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
snake_case__ : List[Any] = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , snake_case_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Dict ):
if self.n_clusters == 0:
snake_case__ : int = self._compute_logit(snake_case_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(snake_case_ , dim=-1 )
else:
# construct weights and biases
snake_case__ , snake_case__ : Union[str, Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
snake_case__ , snake_case__ : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case__ : Union[str, Any] = self.out_layers[0].weight[l_idx:r_idx]
snake_case__ : int = self.out_layers[0].bias[l_idx:r_idx]
else:
snake_case__ : List[Any] = self.out_layers[i].weight
snake_case__ : List[str] = self.out_layers[i].bias
if i == 0:
snake_case__ : Union[str, Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
snake_case__ : Tuple = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case_ )
biases.append(snake_case_ )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = weights[0], biases[0], self.out_projs[0]
snake_case__ : List[str] = self._compute_logit(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
snake_case__ : int = hidden.new_empty((head_logit.size(0 ), self.n_token) )
snake_case__ : Optional[Any] = nn.functional.log_softmax(snake_case_ , dim=1 )
snake_case__ : Tuple = [0] + self.cutoffs
for i in range(len(snake_case_ ) - 1 ):
snake_case__ , snake_case__ : Optional[int] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
snake_case__ : int = head_logprob[:, : self.cutoffs[0]]
else:
snake_case__ , snake_case__ , snake_case__ : Optional[int] = weights[i], biases[i], self.out_projs[i]
snake_case__ : str = self._compute_logit(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
snake_case__ : Optional[int] = nn.functional.log_softmax(snake_case_ , dim=1 )
snake_case__ : Union[str, Any] = head_logprob[:, -i] + tail_logprob_i
snake_case__ : int = logprob_i
return out
| 35 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : int , snake_case_ : Tuple , snake_case_ : List[str]=3 , snake_case_ : Tuple=32 , snake_case_ : List[Any]=3 , snake_case_ : List[str]=10 , snake_case_ : List[str]=[10, 20, 30, 40] , snake_case_ : Tuple=[1, 1, 2, 1] , snake_case_ : Tuple=True , snake_case_ : str=True , snake_case_ : int="relu" , snake_case_ : List[Any]=3 , snake_case_ : str=None , ):
snake_case__ : List[Any] = parent
snake_case__ : List[Any] = batch_size
snake_case__ : int = image_size
snake_case__ : List[Any] = num_channels
snake_case__ : Optional[Any] = embeddings_size
snake_case__ : Optional[int] = hidden_sizes
snake_case__ : Tuple = depths
snake_case__ : Any = is_training
snake_case__ : Optional[int] = use_labels
snake_case__ : Optional[int] = hidden_act
snake_case__ : Optional[int] = num_labels
snake_case__ : int = scope
snake_case__ : Tuple = len(snake_case_ )
def lowerCamelCase ( self : Any ):
snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : int ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Optional[int] ):
snake_case__ : Optional[Any] = TFResNetModel(config=snake_case_ )
snake_case__ : int = model(snake_case_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Union[str, Any] ):
snake_case__ : str = self.num_labels
snake_case__ : Optional[int] = TFResNetForImageClassification(snake_case_ )
snake_case__ : Tuple = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : Tuple ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : str = config_and_inputs
snake_case__ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowercase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Tuple = TFResNetModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowerCamelCase ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : str ):
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def lowerCamelCase ( self : int ):
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def lowerCamelCase ( self : List[Any] ):
pass
def lowerCamelCase ( self : List[Any] ):
snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Dict = model_class(snake_case_ )
snake_case__ : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Union[str, Any] = [*signature.parameters.keys()]
snake_case__ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCamelCase ( self : List[str] ):
def check_hidden_states_output(snake_case_ : Any , snake_case_ : Any , snake_case_ : List[str] ):
snake_case__ : List[Any] = model_class(snake_case_ )
snake_case__ : Dict = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
snake_case__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[Any] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case__ : Dict = layer_type
snake_case__ : Optional[int] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[Any] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def lowerCamelCase ( self : Optional[Any] ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = TFResNetModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def __snake_case( ) -> Optional[int]:
snake_case__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase ( self : List[Any] ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : List[str] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : List[Any] = self.default_image_processor
snake_case__ : List[Any] = prepare_img()
snake_case__ : List[str] = image_processor(images=snake_case_ , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[Any] = model(**snake_case_ )
# verify the logits
snake_case__ : Union[str, Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case_ )
snake_case__ : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case_ , atol=1E-4 ) )
| 35 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase__ ( lowerCamelCase : int ):
_A : Optional[int] = [True] * limit
_A : Any = False
_A : Optional[int] = False
_A : Tuple = True
for i in range(3 ,int(limit**0.5 + 1 ) ,2 ):
_A : Dict = i * 2
while index < limit:
_A : List[Any] = False
_A : Optional[int] = index + i
_A : Optional[int] = [2]
for i in range(3 ,lowerCamelCase ,2 ):
if is_prime[i]:
primes.append(lowerCamelCase )
return primes
def lowerCAmelCase__ ( lowerCamelCase : int = 1000000 ):
_A : List[str] = prime_sieve(lowerCamelCase )
_A : Any = 0
_A : str = 0
for i in range(len(lowerCamelCase ) ):
for j in range(i + length ,len(lowerCamelCase ) ):
_A : List[str] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
_A : Tuple = j - i
_A : Any = sol
return largest
if __name__ == "__main__":
print(f"""{solution() = }""")
| 370 |
'''simple docstring'''
def lowerCAmelCase__ ( lowerCamelCase : int = 10 ):
if not isinstance(lowerCamelCase ,lowerCamelCase ) or n < 0:
raise ValueError('Invalid input' )
_A : Optional[Any] = 10**n
_A : List[str] = 28433 * (pow(2 ,7830457 ,lowerCamelCase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(10) = }""")
| 227 | 0 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
UpperCAmelCase : Optional[int] = NewType("""DataClass""", Any)
UpperCAmelCase : int = NewType("""DataClassType""", Any)
def _A ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
a__ : str ={str(SCREAMING_SNAKE_CASE ): choice for choice in choices}
return lambda SCREAMING_SNAKE_CASE : str_to_choice.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _A ( *,
SCREAMING_SNAKE_CASE : Union[str, List[str]] = None , SCREAMING_SNAKE_CASE : str = None , SCREAMING_SNAKE_CASE : Any = dataclasses.MISSING , SCREAMING_SNAKE_CASE : Callable[[], Any] = dataclasses.MISSING , SCREAMING_SNAKE_CASE : dict = None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
a__ : Tuple ={}
if aliases is not None:
a__ : List[Any] =aliases
if help is not None:
a__ : Optional[Any] =help
return dataclasses.field(metadata=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , default_factory=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Iterable[DataClassType]
def __init__( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
if "formatter_class" not in kwargs:
a__ : str =ArgumentDefaultsHelpFormatter
super().__init__(**lowerCAmelCase__ )
if dataclasses.is_dataclass(lowerCAmelCase__ ):
a__ : Tuple =[dataclass_types]
a__ : Dict =list(lowerCAmelCase__ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowerCAmelCase__ )
@staticmethod
def _lowercase ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ : List[Any] =F'''--{field.name}'''
a__ : Optional[int] =field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowerCAmelCase__ ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
a__ : int =kwargs.pop("aliases" , [] )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : Tuple =[aliases]
a__ : Optional[Any] =getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(lowerCAmelCase__ , "UnionType" ) and isinstance(lowerCAmelCase__ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowerCAmelCase__ ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
F''' Problem encountered in field \'{field.name}\'.''' )
if type(lowerCAmelCase__ ) not in field.type.__args__:
# filter `str` in Union
a__ : Optional[Any] =field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
a__ : int =getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
a__ : Any =(
field.type.__args__[0] if isinstance(lowerCAmelCase__ , field.type.__args__[1] ) else field.type.__args__[1]
)
a__ : int =getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
a__ : Dict ={}
if origin_type is Literal or (isinstance(field.type , lowerCAmelCase__ ) and issubclass(field.type , lowerCAmelCase__ )):
if origin_type is Literal:
a__ : Tuple =field.type.__args__
else:
a__ : Tuple =[x.value for x in field.type]
a__ : int =make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
a__ : Optional[Any] =field.default
else:
a__ : Optional[int] =True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
a__ : List[str] =copy(lowerCAmelCase__ )
# Hack because type=bool in argparse does not behave as we want.
a__ : Optional[int] =string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
a__ : Optional[int] =False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
a__ : Tuple =default
# This tells argparse we accept 0 or 1 value after --field_name
a__ : str ="?"
# This is the value that will get picked if we do --field_name (without value)
a__ : Any =True
elif isclass(lowerCAmelCase__ ) and issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : Optional[Any] =field.type.__args__[0]
a__ : Union[str, Any] ="+"
if field.default_factory is not dataclasses.MISSING:
a__ : str =field.default_factory()
elif field.default is dataclasses.MISSING:
a__ : Any =True
else:
a__ : str =field.type
if field.default is not dataclasses.MISSING:
a__ : Union[str, Any] =field.default
elif field.default_factory is not dataclasses.MISSING:
a__ : str =field.default_factory()
else:
a__ : Optional[Any] =True
parser.add_argument(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
a__ : Optional[int] =False
parser.add_argument(F'''--no_{field.name}''' , action="store_false" , dest=field.name , **lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
if hasattr(lowerCAmelCase__ , "_argument_group_name" ):
a__ : Union[str, Any] =self.add_argument_group(dtype._argument_group_name )
else:
a__ : int =self
try:
a__ : Dict[str, type] =get_type_hints(lowerCAmelCase__ )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(lowerCAmelCase__ ):
a__ : Union[str, Any] =".".join(map(lowerCAmelCase__ , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(lowerCAmelCase__ ):
if not field.init:
continue
a__ : Any =type_hints[field.name]
self._parse_dataclass_field(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Tuple[DataClass, ...]:
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
a__ : str =[]
if args_filename:
args_files.append(Path(lowerCAmelCase__ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
a__ : List[Any] =ArgumentParser()
args_file_parser.add_argument(lowerCAmelCase__ , type=lowerCAmelCase__ , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
a__ , a__ : Union[str, Any] =args_file_parser.parse_known_args(args=lowerCAmelCase__ )
a__ : Optional[Any] =vars(lowerCAmelCase__ ).get(args_file_flag.lstrip("-" ) , lowerCAmelCase__ )
if cmd_args_file_paths:
args_files.extend([Path(lowerCAmelCase__ ) for p in cmd_args_file_paths] )
a__ : int =[]
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
a__ : Dict =file_args + args if args is not None else file_args + sys.argv[1:]
a__ , a__ : str =self.parse_known_args(args=lowerCAmelCase__ )
a__ : int =[]
for dtype in self.dataclass_types:
a__ : Tuple ={f.name for f in dataclasses.fields(lowerCAmelCase__ ) if f.init}
a__ : Any ={k: v for k, v in vars(lowerCAmelCase__ ).items() if k in keys}
for k in keys:
delattr(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : List[Any] =dtype(**lowerCAmelCase__ )
outputs.append(lowerCAmelCase__ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowerCAmelCase__ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
a__ : Optional[Any] =set(args.keys() )
a__ : str =[]
for dtype in self.dataclass_types:
a__ : List[str] ={f.name for f in dataclasses.fields(lowerCAmelCase__ ) if f.init}
a__ : Tuple ={k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
a__ : Any =dtype(**lowerCAmelCase__ )
outputs.append(lowerCAmelCase__ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(lowerCAmelCase__ )}''' )
return tuple(lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
with open(Path(lowerCAmelCase__ ) , encoding="utf-8" ) as open_json_file:
a__ : Tuple =json.loads(open_json_file.read() )
a__ : Dict =self.parse_dict(lowerCAmelCase__ , allow_extra_keys=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
a__ : List[str] =self.parse_dict(yaml.safe_load(Path(lowerCAmelCase__ ).read_text() ) , allow_extra_keys=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 95 |
from __future__ import annotations
import math
lowerCamelCase : Optional[int] = '2020.9.26'
lowerCamelCase : int = 'xcodz-dot, cclaus, dhruvmanila'
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) -> tuple[float, float]:
if not all(isinstance(lowercase ,(float, int) ) for val in locals().values() ):
snake_case : Dict = f"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(lowercase )
snake_case : List[str] = ((x * distance) / (z + distance)) * scale
snake_case : Dict = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) -> tuple[float, float, float]:
if not isinstance(lowercase ,lowercase ):
raise TypeError("""Axis must be a str""" )
snake_case : Tuple = locals()
del input_variables["axis"]
if not all(isinstance(lowercase ,(float, int) ) for val in input_variables.values() ):
snake_case : int = (
"""Input values except axis must either be float or int: """
f"""{list(input_variables.values() )}"""
)
raise TypeError(lowercase )
snake_case : int = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
snake_case : str = x * math.cos(lowercase ) - y * math.sin(lowercase )
snake_case : List[Any] = y * math.cos(lowercase ) + x * math.sin(lowercase )
snake_case : Optional[int] = z
elif axis == "x":
snake_case : Optional[Any] = y * math.cos(lowercase ) - z * math.sin(lowercase )
snake_case : Optional[int] = z * math.cos(lowercase ) + y * math.sin(lowercase )
snake_case : Optional[int] = x
elif axis == "y":
snake_case : List[str] = x * math.cos(lowercase ) - z * math.sin(lowercase )
snake_case : Tuple = z * math.cos(lowercase ) + x * math.sin(lowercase )
snake_case : Optional[int] = y
else:
raise ValueError("""not a valid axis, choose one of 'x', 'y', 'z'""" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(f"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""")
| 124 | 0 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCAmelCase_ = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ) -> Dict:
'''simple docstring'''
require_version(deps[pkg] , UpperCamelCase__ )
| 295 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCamelCase_ :
@property
def lowerCAmelCase ( self ) -> int:
return self.get_dummy_input()
@property
def lowerCAmelCase ( self ) -> Optional[Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def lowerCAmelCase ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> List[str]:
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowerCAmelCase_ )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
return dummy_input
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
unet_block.to(lowerCAmelCase_ )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ )
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
_snake_case = model(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = randn_tensor(output.shape , device=lowerCAmelCase_ )
_snake_case = torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_ )
loss.backward()
| 295 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : int = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
for attribute in key.split('''.''' ):
__lowercase : Optional[int] = getattr(_snake_case , _snake_case )
if weight_type is not None:
__lowercase : Dict = getattr(_snake_case , _snake_case ).shape
else:
__lowercase : int = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
__lowercase : int = value
elif weight_type == "weight_g":
__lowercase : Optional[Any] = value
elif weight_type == "weight_v":
__lowercase : str = value
elif weight_type == "bias":
__lowercase : Union[str, Any] = value
else:
__lowercase : List[str] = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
__lowercase : List[Any] = []
__lowercase : Any = fairseq_model.state_dict()
__lowercase : Optional[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowercase : Any = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
__lowercase : int = True
else:
for key, mapped_key in MAPPING.items():
__lowercase : Dict = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
__lowercase : Any = True
if "*" in mapped_key:
__lowercase : Union[str, Any] = name.split(_snake_case )[0].split('''.''' )[-2]
__lowercase : Any = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
__lowercase : Union[str, Any] = '''weight_g'''
elif "weight_v" in name:
__lowercase : Optional[int] = '''weight_v'''
elif "weight" in name:
__lowercase : Tuple = '''weight'''
elif "bias" in name:
__lowercase : Union[str, Any] = '''bias'''
else:
__lowercase : List[Any] = None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(F'Unused weights: {unused_weights}' )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
__lowercase : str = full_name.split('''conv_layers.''' )[-1]
__lowercase : List[Any] = name.split('''.''' )
__lowercase : Tuple = int(items[0] )
__lowercase : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__lowercase : str = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__lowercase : Dict = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__lowercase : Optional[int] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__lowercase : str = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True ) -> List[str]:
if config_path is not None:
__lowercase : Any = HubertConfig.from_pretrained(_snake_case )
else:
__lowercase : Any = HubertConfig()
if is_finetuned:
if dict_path:
__lowercase : str = Dictionary.load(_snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowercase : Any = target_dict.pad_index
__lowercase : Optional[Any] = target_dict.bos_index
__lowercase : Optional[int] = target_dict.eos_index
__lowercase : List[str] = len(target_dict.symbols )
__lowercase : Optional[int] = os.path.join(_snake_case , '''vocab.json''' )
if not os.path.isdir(_snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) )
return
os.makedirs(_snake_case , exist_ok=_snake_case )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , _snake_case )
__lowercase : str = WavaVecaCTCTokenizer(
_snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , )
__lowercase : List[str] = True if config.feat_extract_norm == '''layer''' else False
__lowercase : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
__lowercase : Optional[Any] = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
processor.save_pretrained(_snake_case )
__lowercase : Union[str, Any] = HubertForCTC(_snake_case )
else:
__lowercase : List[Any] = HubertModel(_snake_case )
if is_finetuned:
__lowercase , __lowercase , __lowercase : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__lowercase , __lowercase , __lowercase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__lowercase : int = model[0].eval()
recursively_load_weights(_snake_case , _snake_case , _snake_case )
hf_wavavec.save_pretrained(_snake_case )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__lowerCAmelCase : Dict = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 156 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
__a =[False] * len(_snake_case )
__a =[-1] * len(_snake_case )
def dfs(_snake_case : Dict , _snake_case : Any ):
__a =True
__a =c
for u in graph[v]:
if not visited[u]:
dfs(_snake_case , 1 - c )
for i in range(len(_snake_case ) ):
if not visited[i]:
dfs(_snake_case , 0 )
for i in range(len(_snake_case ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_lowerCAmelCase : int = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 218 | 0 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( snake_case_ : str = "AAPL" ) -> str:
__snake_case = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__snake_case = BeautifulSoup(requests.get(a__ ).text , '''html.parser''' )
__snake_case = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 360 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : int = XLMTokenizer
A_ : Optional[Any] = False
def a (self : Optional[int] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__snake_case = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(a__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(a__ ) )
def a (self : int , a__ : Any ):
"""simple docstring"""
__snake_case = '''lower newer'''
__snake_case = '''lower newer'''
return input_text, output_text
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = XLMTokenizer(self.vocab_file , self.merges_file )
__snake_case = '''lower'''
__snake_case = ['''low''', '''er</w>''']
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = tokens + ['''<unk>''']
__snake_case = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
@slow
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
__snake_case = tokenizer.encode('''sequence builders''' , add_special_tokens=a__ )
__snake_case = tokenizer.encode('''multi-sequence build''' , add_special_tokens=a__ )
__snake_case = tokenizer.build_inputs_with_special_tokens(a__ )
__snake_case = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 238 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase : Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
lowercase : Dict = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowercase : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> Dict:
'''simple docstring'''
with open(_lowerCamelCase , "rb") as f:
__UpperCamelCase : List[str] = Image.open(_lowerCamelCase)
return im.convert("RGB")
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
_A = field(
default=__lowercase , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
_A = field(
default=__lowercase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
_A = field(default=__lowercase , metadata={'help': 'A folder containing the training data.'})
_A = field(default=__lowercase , metadata={'help': 'A folder containing the validation data.'})
_A = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'})
_A = field(
default=__lowercase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_A = field(
default=__lowercase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def _lowerCamelCase ( self :Optional[int] ) -> Any:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
_A = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_A = field(
default=__lowercase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__lowercase)} , )
_A = field(
default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_A = field(
default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'})
_A = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_A = field(default=__lowercase , metadata={'help': 'Name or path of preprocessor config.'})
_A = field(
default=__lowercase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_A = field(
default=__lowercase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any]) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = torch.stack([example["pixel_values"] for example in examples])
__UpperCamelCase : Optional[Any] = torch.tensor([example["labels"] for example in examples])
return {"pixel_values": pixel_values, "labels": labels}
def _SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
__UpperCamelCase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , _lowerCamelCase , _lowerCamelCase)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCamelCase : Tuple = training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase)
transformers.utils.logging.set_verbosity(_lowerCamelCase)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}')
logger.info(F'Training/evaluation parameters {training_args}')
# Detecting last checkpoint.
__UpperCamelCase : int = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCamelCase : Tuple = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome.")
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.")
# Set seed before initializing model.
set_seed(training_args.seed)
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
__UpperCamelCase : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
__UpperCamelCase : Union[str, Any] = {}
if data_args.train_dir is not None:
__UpperCamelCase : List[str] = os.path.join(data_args.train_dir , "**")
if data_args.validation_dir is not None:
__UpperCamelCase : Optional[Any] = os.path.join(data_args.validation_dir , "**")
__UpperCamelCase : int = load_dataset(
"imagefolder" , data_files=_lowerCamelCase , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
__UpperCamelCase : Dict = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _lowerCamelCase) and data_args.train_val_split > 0.0:
__UpperCamelCase : Union[str, Any] = dataset["train"].train_test_split(data_args.train_val_split)
__UpperCamelCase : List[str] = split["train"]
__UpperCamelCase : Optional[int] = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__UpperCamelCase : Union[str, Any] = dataset["train"].features["labels"].names
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = {}, {}
for i, label in enumerate(_lowerCamelCase):
__UpperCamelCase : List[str] = str(_lowerCamelCase)
__UpperCamelCase : List[Any] = label
# Load the accuracy metric from the datasets package
__UpperCamelCase : int = evaluate.load("accuracy")
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowerCamelCase : Dict):
return metric.compute(predictions=np.argmax(p.predictions , axis=1) , references=p.label_ids)
__UpperCamelCase : Any = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowerCamelCase) , labelaid=_lowerCamelCase , idalabel=_lowerCamelCase , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCamelCase : List[Any] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
__UpperCamelCase : str = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
__UpperCamelCase : Dict = image_processor.size["shortest_edge"]
else:
__UpperCamelCase : str = (image_processor.size["height"], image_processor.size["width"])
__UpperCamelCase : Union[str, Any] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std)
__UpperCamelCase : List[Any] = Compose(
[
RandomResizedCrop(_lowerCamelCase),
RandomHorizontalFlip(),
ToTensor(),
normalize,
])
__UpperCamelCase : List[str] = Compose(
[
Resize(_lowerCamelCase),
CenterCrop(_lowerCamelCase),
ToTensor(),
normalize,
])
def train_transforms(_lowerCamelCase : Tuple):
__UpperCamelCase : Optional[int] = [
_train_transforms(pil_img.convert("RGB")) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(_lowerCamelCase : int):
__UpperCamelCase : List[Any] = [_val_transforms(pil_img.convert("RGB")) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset")
if data_args.max_train_samples is not None:
__UpperCamelCase : List[str] = (
dataset["train"].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples))
)
# Set the training transforms
dataset["train"].set_transform(_lowerCamelCase)
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset")
if data_args.max_eval_samples is not None:
__UpperCamelCase : Tuple = (
dataset["validation"].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples))
)
# Set the validation transforms
dataset["validation"].set_transform(_lowerCamelCase)
# Initalize our trainer
__UpperCamelCase : Union[str, Any] = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=_lowerCamelCase , tokenizer=_lowerCamelCase , data_collator=_lowerCamelCase , )
# Training
if training_args.do_train:
__UpperCamelCase : Tuple = None
if training_args.resume_from_checkpoint is not None:
__UpperCamelCase : int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCamelCase : Optional[Any] = last_checkpoint
__UpperCamelCase : str = trainer.train(resume_from_checkpoint=_lowerCamelCase)
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics)
trainer.save_metrics("train" , train_result.metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
__UpperCamelCase : str = trainer.evaluate()
trainer.log_metrics("eval" , _lowerCamelCase)
trainer.save_metrics("eval" , _lowerCamelCase)
# Write model card and (optionally) push to hub
__UpperCamelCase : Tuple = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCamelCase)
else:
trainer.create_model_card(**_lowerCamelCase)
if __name__ == "__main__":
main() | 232 |
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :int ) -> Dict:
__UpperCamelCase : Union[str, Any] = {}
def _lowerCamelCase ( self :str ) -> None:
print(self.vertex )
for i in self.vertex:
print(a , " -> " , " -> ".join([str(a ) for j in self.vertex[i]] ) )
def _lowerCamelCase ( self :List[Any] , a :int , a :int ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(a )
else:
# else make a new vertex
__UpperCamelCase : Optional[Any] = [to_vertex]
def _lowerCamelCase ( self :Tuple ) -> None:
# visited array for storing already visited nodes
__UpperCamelCase : Dict = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(a , a )
def _lowerCamelCase ( self :Any , a :int , a :list ) -> None:
# mark start vertex as visited
__UpperCamelCase : int = True
print(a , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(a , a )
if __name__ == "__main__":
lowercase : Dict = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3 | 232 | 1 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : list[tuple[float, float]]):
_A : List[str] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_A : Optional[int] = len(SCREAMING_SNAKE_CASE) - 1
def A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : float):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_A : list[float] = []
for i in range(len(self.list_of_points)):
# basis function for each i
output_values.append(
comb(self.degree , SCREAMING_SNAKE_CASE) * ((1 - t) ** (self.degree - i)) * (t**i))
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(SCREAMING_SNAKE_CASE) , 5) == 1
return output_values
def A ( self : str , SCREAMING_SNAKE_CASE : float):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_A : Tuple = self.basis_function(SCREAMING_SNAKE_CASE)
_A : List[str] = 0.0
_A : List[str] = 0.0
for i in range(len(self.list_of_points)):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def A ( self : Any , SCREAMING_SNAKE_CASE : float = 0.01):
from matplotlib import pyplot as plt # type: ignore
_A : list[float] = [] # x coordinates of points to plot
_A : list[float] = [] # y coordinates of points to plot
_A : Optional[int] = 0.0
while t <= 1:
_A : Union[str, Any] = self.bezier_curve_function(SCREAMING_SNAKE_CASE)
to_plot_x.append(value[0])
to_plot_y.append(value[1])
t += step_size
_A : List[str] = [i[0] for i in self.list_of_points]
_A : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , color='blue' , label='Curve of Degree ' + str(self.degree) , )
plt.scatter(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , color='red' , label='Control Points')
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 227 |
'''simple docstring'''
class __lowerCamelCase : # Public class to implement a graph
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[bool]]):
_A : List[Any] = row
_A : Union[str, Any] = col
_A : List[str] = graph
def A ( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[bool]]):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def A ( self : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[bool]]):
# Checking all 8 elements surrounding nth element
_A : Tuple = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_A : Dict = [-1, 0, 1, -1, 1, -1, 0, 1]
_A : List[Any] = True # Make those cells visited
for k in range(8):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , SCREAMING_SNAKE_CASE):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , SCREAMING_SNAKE_CASE)
def A ( self : int): # And finally, count all islands.
_A : Dict = [[False for j in range(self.COL)] for i in range(self.ROW)]
_A : Union[str, Any] = 0
for i in range(self.ROW):
for j in range(self.COL):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
count += 1
return count
| 227 | 1 |
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def lowercase ( A_ , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
a : Optional[int] = [0] * no_of_processes
a : str = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowerCAmelCase_ ):
a : int = burst_time[i]
a : Union[str, Any] = 0
a : Union[str, Any] = 0
a : str = 999_999_999
a : List[Any] = 0
a : List[str] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowerCAmelCase_ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
a : Any = remaining_time[j]
a : Optional[int] = j
a : Dict = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
a : Dict = remaining_time[short]
if minm == 0:
a : int = 999_999_999
if remaining_time[short] == 0:
complete += 1
a : Tuple = False
# Find finish time of current process
a : Any = increment_time + 1
# Calculate waiting time
a : str = finish_time - arrival_time[short]
a : Optional[int] = finar - burst_time[short]
if waiting_time[short] < 0:
a : List[str] = 0
# Increment time
increment_time += 1
return waiting_time
def lowercase ( A_ , A_ , A_ )-> Union[str, Any]:
'''simple docstring'''
a : str = [0] * no_of_processes
for i in range(lowerCAmelCase_ ):
a : Optional[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
def lowercase ( A_ , A_ , A_ )-> Optional[int]:
'''simple docstring'''
a : Any = 0
a : Optional[Any] = 0
for i in range(lowerCAmelCase_ ):
a : str = total_waiting_time + waiting_time[i]
a : Union[str, Any] = total_turn_around_time + turn_around_time[i]
print(F'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
__lowercase = int(input())
__lowercase = [0] * no_of_processes
__lowercase = [0] * no_of_processes
__lowercase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
__lowercase = map(int, input().split())
__lowercase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowercase = burst_time
__lowercase = no_of_processes
__lowercase = waiting_time
__lowercase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__lowercase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 40 |
"""simple docstring"""
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return x + 2
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Any ) -> Any:
__SCREAMING_SNAKE_CASE = "x = 3"
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
assert result == 3
self.assertDictEqual(UpperCAmelCase__ , {"x": 3} )
__SCREAMING_SNAKE_CASE = "x = y"
__SCREAMING_SNAKE_CASE = {"y": 5}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase__ , {"x": 5, "y": 5} )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
__SCREAMING_SNAKE_CASE = "y = add_two(x)"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"add_two": add_two} , state=UpperCAmelCase__ )
assert result == 5
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = "x = 3"
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
assert result == 3
self.assertDictEqual(UpperCAmelCase__ , {"x": 3} )
def UpperCAmelCase_ ( self : str ) -> Any:
__SCREAMING_SNAKE_CASE = "test_dict = {'x': x, 'y': add_two(x)}"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"add_two": add_two} , state=UpperCAmelCase__ )
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "y": 5} )
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = "x = 3\ny = 5"
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "y": 5} )
def UpperCAmelCase_ ( self : Any ) -> Any:
__SCREAMING_SNAKE_CASE = "text = f'This is x: {x}.'"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "text": "This is x: 3."} )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = "if x <= 3:\n y = 2\nelse:\n y = 5"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "y": 2} )
__SCREAMING_SNAKE_CASE = {"x": 8}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase__ , {"x": 8, "y": 5} )
def UpperCAmelCase_ ( self : Tuple ) -> str:
__SCREAMING_SNAKE_CASE = "test_list = [x, add_two(x)]"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"add_two": add_two} , state=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , [3, 5] )
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "test_list": [3, 5]} )
def UpperCAmelCase_ ( self : Any ) -> int:
__SCREAMING_SNAKE_CASE = "y = x"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
assert result == 3
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "y": 3} )
def UpperCAmelCase_ ( self : Tuple ) -> int:
__SCREAMING_SNAKE_CASE = "test_list = [x, add_two(x)]\ntest_list[1]"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"add_two": add_two} , state=UpperCAmelCase__ )
assert result == 5
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "test_list": [3, 5]} )
__SCREAMING_SNAKE_CASE = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"add_two": add_two} , state=UpperCAmelCase__ )
assert result == 5
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE = "x = 0\nfor i in range(3):\n x = i"
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"range": range} , state=UpperCAmelCase__ )
assert result == 2
self.assertDictEqual(UpperCAmelCase__ , {"x": 2, "i": 2} )
| 54 | 0 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class A_ ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = BlenderbotSmallTokenizer
lowerCAmelCase__ = False
def _lowerCAmelCase (self :Optional[Any] )-> int:
super().setUp()
__A = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
__A = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
__A = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
__A = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
__A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_UpperCamelCase ) )
def _lowerCAmelCase (self :Tuple , **_UpperCamelCase :List[Any] )-> int:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :List[Any] )-> Optional[int]:
__A = '''adapt act apte'''
__A = '''adapt act apte'''
return input_text, output_text
def _lowerCAmelCase (self :Dict )-> Union[str, Any]:
__A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__A = '''adapt act apte'''
__A = ['''adapt''', '''act''', '''ap@@''', '''te''']
__A = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
__A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__A = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
def _lowerCAmelCase (self :Tuple )-> Any:
__A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1384]
__A = '''I am a small frog.'''
__A = tok([src_text] , padding=_UpperCamelCase , truncation=_UpperCamelCase )['''input_ids''']
__A = tok.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _lowerCAmelCase (self :List[str] )-> int:
__A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
__A = '''I am a small frog .'''
__A = '''.'''
__A = tok(_UpperCamelCase )['''input_ids''']
__A = tok(_UpperCamelCase )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 356 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
@slow
def _lowerCAmelCase (self :Dict )-> Dict:
__A = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_UpperCamelCase ).to(_UpperCamelCase )
__A = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__A = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
__A = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
__A = model(input_ids.to(_UpperCamelCase ) , labels=labels.to(_UpperCamelCase ) ).loss
__A = -(labels.shape[-1] * loss.item())
__A = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 250 | 0 |
"""simple docstring"""
class snake_case :
'''simple docstring'''
def __init__( self : List[str], _lowerCamelCase : list[int] ):
'''simple docstring'''
__A = len(_lowerCamelCase )
__A = [0] * len_array
if len_array > 0:
__A = array[0]
for i in range(1, _lowerCamelCase ):
__A = self.prefix_sum[i - 1] + array[i]
def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : int ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : int ):
'''simple docstring'''
__A = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(_lowerCamelCase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 266 |
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
lowercase_ = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCAmelCase ( ):
"""simple docstring"""
__A = '''https://pypi.org/pypi/diffusers/json'''
__A = json.loads(request.urlopen(__UpperCamelCase ).read() )['''releases'''].keys()
return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : version.Version(__UpperCamelCase ) )
def lowerCAmelCase ( ):
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__UpperCamelCase )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
__A = Path(__UpperCamelCase ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
init_hf_modules()
__A = Path(__UpperCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
__A = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__A = f.read()
# Imports of the form `import .xxx`
__A = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(__UpperCamelCase ) )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = False
__A = [module_file]
__A = []
# Let's recurse through all relative imports
while not no_change:
__A = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__UpperCamelCase ) )
__A = Path(__UpperCamelCase ).parent
__A = [str(module_path / m ) for m in new_imports]
__A = [f for f in new_import_files if f not in all_relative_imports]
__A = [f'{f}.py' for f in new_import_files]
__A = len(__UpperCamelCase ) == 0
all_relative_imports.extend(__UpperCamelCase )
return all_relative_imports
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__A = f.read()
# Imports of the form `import xxx`
__A = re.findall('''^\s*import\s+(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE )
# Only keep the top-level module
__A = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
__A = list(set(__UpperCamelCase ) )
__A = []
for imp in imports:
try:
importlib.import_module(__UpperCamelCase )
except ImportError:
missing_packages.append(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
f'{", ".join(__UpperCamelCase )}. Run `pip install {" ".join(__UpperCamelCase )}`' )
return get_relative_imports(__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = module_path.replace(os.path.sep , '''.''' )
__A = importlib.import_module(__UpperCamelCase )
if class_name is None:
return find_pipeline_class(__UpperCamelCase )
return getattr(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
__A = dict(inspect.getmembers(__UpperCamelCase , inspect.isclass ) )
__A = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __UpperCamelCase )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
f' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
f' {loaded_module}.' )
__A = cls
return pipeline_class
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , ):
"""simple docstring"""
__A = str(__UpperCamelCase )
__A = os.path.join(__UpperCamelCase , __UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
__A = module_file_or_url
__A = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
__A = get_diffusers_versions()
# cut ".dev0"
__A = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
__A = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(f'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
__A = f'v{revision}'
elif revision == "main":
__A = revision
else:
raise ValueError(
f'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
f' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
__A = COMMUNITY_PIPELINES_URL.format(revision=__UpperCamelCase , pipeline=__UpperCamelCase )
try:
__A = cached_download(
__UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , )
__A = '''git'''
__A = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
__A = hf_hub_download(
__UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , )
__A = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
__A = check_imports(__UpperCamelCase )
# Now we move the module inside our cached dynamic modules.
__A = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__UpperCamelCase )
__A = Path(__UpperCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__UpperCamelCase , submodule_path / module_file )
for module_needed in modules_needed:
__A = f'{module_needed}.py'
shutil.copy(os.path.join(__UpperCamelCase , __UpperCamelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__A = use_auth_token
elif use_auth_token is True:
__A = HfFolder.get_token()
else:
__A = None
__A = model_info(__UpperCamelCase , revision=__UpperCamelCase , token=__UpperCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__A = submodule_path / commit_hash
__A = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__UpperCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(__UpperCamelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__UpperCamelCase , f'{module_needed}.py' , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , )
return os.path.join(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , **__UpperCamelCase , ):
"""simple docstring"""
__A = get_cached_module_file(
__UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , )
return get_class_in_module(__UpperCamelCase , final_module.replace('''.py''' , '''''' ) )
| 266 | 1 |
'''simple docstring'''
import numpy as np
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray ):
'''simple docstring'''
return vector * sigmoid(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 |
'''simple docstring'''
import enum
import shutil
import sys
UpperCAmelCase_ , UpperCAmelCase_ = shutil.get_terminal_size()
UpperCAmelCase_ = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class lowerCAmelCase_ ( enum.Enum ):
'''simple docstring'''
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : Union[str, Any] = 1
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]="" ):
'''simple docstring'''
sys.stdout.write(str(SCREAMING_SNAKE_CASE__ ) + end )
sys.stdout.flush()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int="" ):
'''simple docstring'''
forceWrite(F'''\u001b[{color}m{content}\u001b[0m''' , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( ):
'''simple docstring'''
forceWrite("""\r""" )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
forceWrite(F'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def _UpperCamelCase ( ):
'''simple docstring'''
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def _UpperCamelCase ( ):
'''simple docstring'''
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 61 | 1 |
import re
from filelock import FileLock
try:
import nltk
lowercase__ : List[str] = True
except (ImportError, ModuleNotFoundError):
lowercase__ : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def A_ ( snake_case : str ) -> str:
'''simple docstring'''
re.sub('''<n>''' , '''''' , snake_case ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(snake_case ) )
| 328 |
from __future__ import annotations
import math
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowercase__ : int = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def A_ ( snake_case : int ) -> list[int]:
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
__UpperCamelCase = []
for num in range(len(snake_case ) ):
__UpperCamelCase = 0
while 2 * i * i <= odd_composites[num]:
__UpperCamelCase = odd_composites[num] - 2 * i * i
if is_prime(snake_case ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(snake_case ) == n:
return list_nums
return []
def A_ ( ) -> int:
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"{solution() = }")
| 328 | 1 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = None
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict=0.999 , _SCREAMING_SNAKE_CASE : Any="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Tuple ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Any ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_UpperCAmelCase = []
for i in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = i / num_diffusion_timesteps
_UpperCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class _a ( lowerCAmelCase , lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = 1
@register_to_config
def __init__( self : List[Any] , __UpperCamelCase : int = 1_0_0_0 , __UpperCamelCase : float = 0.0_0_0_1 , __UpperCamelCase : float = 0.0_2 , __UpperCamelCase : str = "linear" , __UpperCamelCase : Optional[Union[np.ndarray, List[float]]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : int = 0 , __UpperCamelCase : str = "epsilon" , __UpperCamelCase : float = 1.0 , **__UpperCamelCase : Optional[int] , )->Dict:
if kwargs.get('''set_alpha_to_one''' , __UpperCamelCase ) is not None:
_UpperCAmelCase = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase )
_UpperCAmelCase = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
_UpperCAmelCase = torch.tensor(__UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_UpperCAmelCase = torch.linspace(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __UpperCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCAmelCase = betas_for_alpha_bar(__UpperCamelCase )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
_UpperCAmelCase = 1.0 - self.betas
_UpperCAmelCase = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_UpperCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_UpperCAmelCase = 1.0
# setable values
_UpperCAmelCase = None
_UpperCAmelCase = torch.from_numpy(np.arange(0 , __UpperCamelCase ).copy().astype(np.intaa ) )
def lowercase__ ( self : str , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : Optional[int] = None )->torch.FloatTensor:
return sample
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : Union[str, torch.device] = None )->Any:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'
F' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'
F' maximal {self.config.num_train_timesteps} timesteps.' )
_UpperCAmelCase = num_inference_steps
_UpperCAmelCase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase = (np.arange(0 , __UpperCamelCase ) * step_ratio).round().copy().astype(np.intaa )
_UpperCAmelCase = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase )
self.timesteps += self.config.steps_offset
def lowercase__ ( self : Any , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : int , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : float = 0.0 , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : bool = True , )->Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
_UpperCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_UpperCAmelCase = self.alphas_cumprod[timestep]
_UpperCAmelCase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_UpperCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_UpperCAmelCase = model_output
elif self.config.prediction_type == "sample":
_UpperCAmelCase = model_output
_UpperCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_UpperCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_UpperCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_UpperCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__UpperCamelCase , pred_original_sample=__UpperCamelCase )
def __len__( self : Any )->str:
return self.config.num_train_timesteps
| 350 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = None
UpperCamelCase__ = None
__A : Union[str, Any] = namedtuple("CoinsDistribResult", "moves excess")
def lowercase ( _SCREAMING_SNAKE_CASE : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_UpperCAmelCase , _UpperCAmelCase = get_distrib(node.left )
_UpperCAmelCase , _UpperCAmelCase = get_distrib(node.right )
_UpperCAmelCase = 1 - left_distrib_excess
_UpperCAmelCase = 1 - right_distrib_excess
_UpperCAmelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(_SCREAMING_SNAKE_CASE )
+ abs(_SCREAMING_SNAKE_CASE )
)
_UpperCAmelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return get_distrib(_SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_UpperCamelCase : List[str] = 8
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str=BITS ):
'''simple docstring'''
lowercase__ : List[Any] = x.device
lowercase__ : List[str] = (x * 255).int().clamp(0 , 255 )
lowercase__ : Union[str, Any] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_lowerCAmelCase )
lowercase__ : Dict = rearrange(_lowerCAmelCase , 'd -> d 1 1' )
lowercase__ : List[Any] = rearrange(_lowerCAmelCase , 'b c h w -> b c 1 h w' )
lowercase__ : Any = ((x & mask) != 0).float()
lowercase__ : Optional[int] = rearrange(_lowerCAmelCase , 'b c d h w -> b (c d) h w' )
lowercase__ : int = bits * 2 - 1
return bits
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any]=BITS ):
'''simple docstring'''
lowercase__ : Dict = x.device
lowercase__ : str = (x > 0).int()
lowercase__ : str = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_lowerCAmelCase , dtype=torch.intaa )
lowercase__ : Union[str, Any] = rearrange(_lowerCAmelCase , 'd -> d 1 1' )
lowercase__ : Tuple = rearrange(_lowerCAmelCase , 'b (c d) h w -> b c d h w' , d=8 )
lowercase__ : Union[str, Any] = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def a_ ( self : List[Any] , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : int , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
lowercase__ : Union[str, Any] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
lowercase__ : Optional[Any] = self.alphas_cumprod[timestep]
lowercase__ : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
lowercase__ : Optional[int] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
lowercase__ : str = self.bit_scale
if self.config.clip_sample:
lowercase__ : Tuple = torch.clamp(_lowerCAmelCase , -scale , _lowerCAmelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
lowercase__ : int = self._get_variance(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : List[str] = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
lowercase__ : str = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ : Tuple = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
lowercase__ : Optional[Any] = model_output.device if torch.is_tensor(_lowerCAmelCase ) else 'cpu'
lowercase__ : Tuple = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_lowerCAmelCase ).to(_lowerCAmelCase )
lowercase__ : Tuple = self._get_variance(_lowerCAmelCase , _lowerCAmelCase ) ** 0.5 * eta * noise
lowercase__ : int = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
def a_ ( self : List[str] , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : int , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : int="epsilon" , _lowerCAmelCase : int=None , _lowerCAmelCase : bool = True , ):
'''simple docstring'''
lowercase__ : Dict = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ : List[Any] = torch.split(_lowerCAmelCase , sample.shape[1] , dim=1 )
else:
lowercase__ : Tuple = None
# 1. compute alphas, betas
lowercase__ : int = self.alphas_cumprod[t]
lowercase__ : Union[str, Any] = self.alphas_cumprod[t - 1] if t > 0 else self.one
lowercase__ : Optional[int] = 1 - alpha_prod_t
lowercase__ : Any = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
lowercase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
lowercase__ : Optional[Any] = model_output
else:
raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
lowercase__ : Optional[int] = self.bit_scale
if self.config.clip_sample:
lowercase__ : Any = torch.clamp(_lowerCAmelCase , -scale , _lowerCAmelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
lowercase__ : Tuple = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowercase__ : Optional[int] = 0
if t > 0:
lowercase__ : Dict = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_lowerCAmelCase ).to(model_output.device )
lowercase__ : List[str] = (self._get_variance(_lowerCAmelCase , predicted_variance=_lowerCAmelCase ) ** 0.5) * noise
lowercase__ : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
class UpperCAmelCase_ ( _a):
def __init__( self , a , a , a = 1.0 , ) -> str:
super().__init__()
lowercase__ : Any = bit_scale
lowercase__ : str = (
ddim_bit_scheduler_step if isinstance(a , a ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=a , scheduler=a )
@torch.no_grad()
def __call__( self , a = 2_5_6 , a = 2_5_6 , a = 5_0 , a = None , a = 1 , a = "pil" , a = True , **a , ) -> Union[Tuple, ImagePipelineOutput]:
lowercase__ : Optional[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=a , )
lowercase__ : Optional[int] = decimal_to_bits(a ) * self.bit_scale
lowercase__ : Dict = latents.to(self.device )
self.scheduler.set_timesteps(a )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
lowercase__ : str = self.unet(a , a ).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase__ : Tuple = self.scheduler.step(a , a , a ).prev_sample
lowercase__ : List[str] = bits_to_decimal(a )
if output_type == "pil":
lowercase__ : Optional[int] = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 77 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : List[str]
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "dict"
lowerCAmelCase : ClassVar[Any] = None
lowerCAmelCase : str = field(default="Translation" ,init=A_ ,repr=A_ )
def __call__( self : List[str] ) -> Any:
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase ( self : List[str] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : Optional[List] = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "dict"
lowerCAmelCase : ClassVar[Any] = None
lowerCAmelCase : str = field(default="TranslationVariableLanguages" ,init=A_ ,repr=A_ )
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int] = sorted(set(self.languages ) ) if self.languages else None
lowercase__ : Dict = len(self.languages ) if self.languages else None
def __call__( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase ( self : Dict ,_snake_case : Tuple ) -> int:
"""simple docstring"""
lowercase__ : List[Any] = set(self.languages )
if self.languages and set(_snake_case ) - lang_set:
raise ValueError(
f"""Some languages in example ({", ".join(sorted(set(_snake_case ) - lang_set ) )}) are not in valid set ({", ".join(_snake_case )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowercase__ : str = []
for lang, text in translation_dict.items():
if isinstance(_snake_case ,_snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowercase__ , lowercase__ : Optional[Any] = zip(*sorted(_snake_case ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase ( self : List[Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 16 | 0 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__A : List[str] = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def UpperCamelCase_ ( A__ : Any ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def UpperCamelCase_ ( A__ : Union[str, Any] , A__ : Any ):
'''simple docstring'''
if args.student_type == "roberta":
lowerCAmelCase_ : Any = False
elif args.student_type == "gpt2":
lowerCAmelCase_ : List[Any] = False
def UpperCamelCase_ ( A__ : Any , A__ : Dict ):
'''simple docstring'''
if args.student_type == "roberta":
lowerCAmelCase_ : Optional[int] = False
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=A__ , required=A__ , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=A__ , required=A__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=A__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=A__ , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=A__ , required=A__ , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=A__ , type=A__ , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=A__ , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=A__ , required=A__ , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=A__ , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=A__ , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=A__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=A__ , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=A__ , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=A__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=A__ , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=A__ , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=A__ , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=A__ , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=A__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=A__ , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=A__ , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=A__ , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=A__ , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=A__ , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=A__ , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5E-4 , type=A__ , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=A__ , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=A__ , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=A__ , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=A__ , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=A__ , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=A__ , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=A__ , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=A__ , default=5_00 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=A__ , default=40_00 , help="""Checkpoint interval.""" )
lowerCAmelCase_ : List[str] = parser.parse_args()
sanity_checks(A__ )
# ARGS #
init_gpu_params(A__ )
set_seed(A__ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(f'Param: {args}' )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(A__ ) , A__ , indent=4 )
git_log(args.dump_path )
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Tuple = MODEL_CLASSES[args.student_type]
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Dict = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowerCAmelCase_ : Union[str, Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
lowerCAmelCase_ : str = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowerCAmelCase_ : Optional[Any] = tokenizer.all_special_tokens.index(A__ )
lowerCAmelCase_ : List[Any] = tokenizer.all_special_ids[idx]
logger.info(f'Special tokens {special_tok_ids}' )
lowerCAmelCase_ : Optional[Any] = special_tok_ids
lowerCAmelCase_ : Union[str, Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'Loading data from {args.data_file}' )
with open(args.data_file , """rb""" ) as fp:
lowerCAmelCase_ : Union[str, Any] = pickle.load(A__ )
if args.mlm:
logger.info(f'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts , """rb""" ) as fp:
lowerCAmelCase_ : Union[str, Any] = pickle.load(A__ )
lowerCAmelCase_ : Any = np.maximum(A__ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowerCAmelCase_ : Union[str, Any] = 0.0 # do not predict special tokens
lowerCAmelCase_ : str = torch.from_numpy(A__ )
else:
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Union[str, Any] = LmSeqsDataset(params=A__ , data=A__ )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f'Loading student config from {args.student_config}' )
lowerCAmelCase_ : str = student_config_class.from_pretrained(args.student_config )
lowerCAmelCase_ : Dict = True
if args.student_pretrained_weights is not None:
logger.info(f'Loading pretrained weights from {args.student_pretrained_weights}' )
lowerCAmelCase_ : List[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=A__ )
else:
lowerCAmelCase_ : Dict = student_model_class(A__ )
if args.n_gpu > 0:
student.to(f'cuda:{args.local_rank}' )
logger.info("""Student loaded.""" )
# TEACHER #
lowerCAmelCase_ : List[Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=A__ )
if args.n_gpu > 0:
teacher.to(f'cuda:{args.local_rank}' )
logger.info(f'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(A__ , A__ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(A__ , A__ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowerCAmelCase_ : Optional[int] = Distiller(
params=A__ , dataset=A__ , token_probs=A__ , student=A__ , teacher=A__ )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 89 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[str] = logging.get_logger(__name__)
def UpperCamelCase_ ( A__ : List[str] ):
'''simple docstring'''
print("""Loading config file...""" )
def flatten_yaml_as_dict(A__ : int , A__ : str="" , A__ : Dict="." ):
lowerCAmelCase_ : int = []
for k, v in d.items():
lowerCAmelCase_ : Any = parent_key + sep + k if parent_key else k
if isinstance(A__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(A__ , A__ , sep=A__ ).items() )
else:
items.append((new_key, v) )
return dict(A__ )
lowerCAmelCase_ : Optional[int] = argparse.Namespace()
with open(A__ , """r""" ) as yaml_file:
try:
lowerCAmelCase_ : Any = yaml.load(A__ , Loader=yaml.FullLoader )
lowerCAmelCase_ : Any = flatten_yaml_as_dict(A__ )
for k, v in flat_cfg.items():
setattr(A__ , A__ , A__ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(A__ , str(A__ ) ) )
return config
def UpperCamelCase_ ( A__ : Optional[int] , A__ : List[Any] ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = MobileViTVaConfig()
lowerCAmelCase_ : Any = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
lowerCAmelCase_ : Tuple = 10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
lowerCAmelCase_ : List[str] = 3_84
else:
lowerCAmelCase_ : Optional[int] = 2_56
lowerCAmelCase_ : int = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
lowerCAmelCase_ : int = 2_10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
lowerCAmelCase_ : int = 3_84
else:
lowerCAmelCase_ : int = 2_56
lowerCAmelCase_ : Optional[Any] = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
lowerCAmelCase_ : Dict = 1_51
lowerCAmelCase_ : Any = 5_12
lowerCAmelCase_ : int = """ade20k-id2label.json"""
lowerCAmelCase_ : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
lowerCAmelCase_ : Any = 21
lowerCAmelCase_ : List[str] = 5_12
lowerCAmelCase_ : Union[str, Any] = """pascal-voc-id2label.json"""
lowerCAmelCase_ : int = True
# orig_config
lowerCAmelCase_ : Optional[int] = load_orig_config_file(A__ )
assert getattr(A__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
lowerCAmelCase_ : Union[str, Any] = getattr(A__ , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(A__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCAmelCase_ : List[Any] = getattr(A__ , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCAmelCase_ : List[str] = getattr(A__ , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
lowerCAmelCase_ : Optional[int] = getattr(A__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
lowerCAmelCase_ : Optional[Any] = getattr(A__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 )
lowerCAmelCase_ : Optional[int] = getattr(A__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
lowerCAmelCase_ : Any = """huggingface/label-files"""
lowerCAmelCase_ : Any = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : Any = {int(A__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[int] = idalabel
lowerCAmelCase_ : Tuple = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase_ ( A__ : int , A__ : Dict , A__ : Tuple ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = dct.pop(A__ )
lowerCAmelCase_ : Optional[int] = val
def UpperCamelCase_ ( A__ : int , A__ : int=False ):
'''simple docstring'''
if base_model:
lowerCAmelCase_ : List[Any] = """"""
else:
lowerCAmelCase_ : Any = """mobilevitv2."""
lowerCAmelCase_ : int = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCAmelCase_ : List[Any] = k[8:]
else:
lowerCAmelCase_ : Optional[Any] = k
if ".block." in k:
lowerCAmelCase_ : Dict = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
lowerCAmelCase_ : Any = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
lowerCAmelCase_ : List[Any] = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
lowerCAmelCase_ : Any = k_new.replace("""conv_1.""" , f'{model_prefix}conv_stem.' )
for i in [1, 2]:
if f'layer_{i}.' in k:
lowerCAmelCase_ : Optional[Any] = k_new.replace(f'layer_{i}.' , f'{model_prefix}encoder.layer.{i-1}.layer.' )
if ".exp_1x1." in k:
lowerCAmelCase_ : Optional[int] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
lowerCAmelCase_ : List[str] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if f'layer_{i}.0.' in k:
lowerCAmelCase_ : Optional[int] = k_new.replace(f'layer_{i}.0.' , f'{model_prefix}encoder.layer.{i-1}.downsampling_layer.' )
if f'layer_{i}.1.local_rep.0.' in k:
lowerCAmelCase_ : str = k_new.replace(f'layer_{i}.1.local_rep.0.' , f'{model_prefix}encoder.layer.{i-1}.conv_kxk.' )
if f'layer_{i}.1.local_rep.1.' in k:
lowerCAmelCase_ : Optional[Any] = k_new.replace(f'layer_{i}.1.local_rep.1.' , f'{model_prefix}encoder.layer.{i-1}.conv_1x1.' )
for i in [3, 4, 5]:
if i == 3:
lowerCAmelCase_ : Optional[Any] = [0, 1]
elif i == 4:
lowerCAmelCase_ : Any = [0, 1, 2, 3]
elif i == 5:
lowerCAmelCase_ : Any = [0, 1, 2]
for j in j_in:
if f'layer_{i}.1.global_rep.{j}.' in k:
lowerCAmelCase_ : Optional[int] = k_new.replace(
f'layer_{i}.1.global_rep.{j}.' , f'{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.' )
if f'layer_{i}.1.global_rep.{j+1}.' in k:
lowerCAmelCase_ : Optional[Any] = k_new.replace(
f'layer_{i}.1.global_rep.{j+1}.' , f'{model_prefix}encoder.layer.{i-1}.layernorm.' )
if f'layer_{i}.1.conv_proj.' in k:
lowerCAmelCase_ : str = k_new.replace(f'layer_{i}.1.conv_proj.' , f'{model_prefix}encoder.layer.{i-1}.conv_projection.' )
if "pre_norm_attn.0." in k:
lowerCAmelCase_ : Dict = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
lowerCAmelCase_ : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
lowerCAmelCase_ : Any = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
lowerCAmelCase_ : int = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
lowerCAmelCase_ : List[Any] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
lowerCAmelCase_ : Optional[int] = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
lowerCAmelCase_ : Optional[Any] = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
lowerCAmelCase_ : Dict = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
lowerCAmelCase_ : List[Any] = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def UpperCamelCase_ ( A__ : Tuple ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(A__ )
for k in keys_to_ignore:
state_dict.pop(A__ , A__ )
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCAmelCase_ : Optional[Any] = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( A__ : Dict , A__ : Union[str, Any] , A__ : str , A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : int = get_mobilevitva_config(A__ , A__ )
# load original state_dict
lowerCAmelCase_ : int = torch.load(A__ , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
lowerCAmelCase_ : Union[str, Any] = MobileViTVaForSemanticSegmentation(A__ ).eval()
lowerCAmelCase_ : Union[str, Any] = False
else:
lowerCAmelCase_ : List[Any] = MobileViTVaForImageClassification(A__ ).eval()
lowerCAmelCase_ : Optional[int] = False
# remove and rename some keys of load the original model
lowerCAmelCase_ : Tuple = checkpoint
remove_unused_keys(A__ )
lowerCAmelCase_ : List[Any] = create_rename_keys(A__ , base_model=A__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(A__ , A__ , A__ )
# load modified state_dict
model.load_state_dict(A__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCAmelCase_ : Dict = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCAmelCase_ : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCAmelCase_ : int = model(**A__ )
# verify classification model
if task_name.startswith("""imagenet""" ):
lowerCAmelCase_ : Optional[Any] = outputs.logits
lowerCAmelCase_ : Tuple = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCAmelCase_ : int = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] )
assert torch.allclose(logits[0, :3] , A__ , atol=1E-4 )
Path(A__ ).mkdir(exist_ok=A__ )
print(f'Saving model {task_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
__A : Union[str, Any] = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 89 | 1 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class SCREAMING_SNAKE_CASE_ :
__lowerCAmelCase = PegasusConfig
__lowerCAmelCase = {}
__lowerCAmelCase = """gelu"""
def __init__( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : str=13 , lowerCamelCase_ : Union[str, Any]=7 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : str=False , lowerCamelCase_ : Optional[int]=99 , lowerCamelCase_ : int=32 , lowerCamelCase_ : int=2 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : Dict=37 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Optional[Any]=0.1 , lowerCamelCase_ : List[Any]=40 , lowerCamelCase_ : Any=2 , lowerCamelCase_ : str=1 , lowerCamelCase_ : Any=0 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = eos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase = prepare_pegasus_inputs_dict(A_ , A_ , A_ )
return config, inputs_dict
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = TFPegasusModel(config=A_ ).get_decoder()
UpperCamelCase = inputs_dict['''input_ids''']
UpperCamelCase = input_ids[:1, :]
UpperCamelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCamelCase = inputs_dict['''head_mask''']
UpperCamelCase = 1
# first forward pass
UpperCamelCase = model(A_ , attention_mask=A_ , head_mask=A_ , use_cache=A_ )
UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase = model(A_ , attention_mask=A_ )[0]
UpperCamelCase = model(A_ , attention_mask=A_ , past_key_values=A_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A_ , A_ , rtol=1E-3 )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
UpperCamelCase = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__lowerCAmelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__lowerCAmelCase = (
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = TFPegasusModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
__lowerCAmelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
__lowerCAmelCase = [
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__lowerCAmelCase = """google/pegasus-xsum"""
@cached_property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase_ ( self : Dict , **lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = self.translate_src_text(**A_ )
assert self.expected_text == generated_words
def lowerCamelCase_ ( self : Tuple , **lowerCamelCase_ : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.tokenizer(self.src_text , **A_ , padding=A_ , return_tensors="""tf""" )
UpperCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A_ , )
UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A_ )
return generated_words
@slow
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 343 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = 42
_UpperCamelCase = 42
_UpperCamelCase = None
class __lowercase (_UpperCAmelCase , _UpperCAmelCase ):
_UpperCamelCase = 2
@register_to_config
def __init__( self , A_ = 0.02 , A_ = 100 , A_ = 1.007 , A_ = 80 , A_ = 0.05 , A_ = 50 , ) ->int:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = sigma_max
# setable values
__lowerCAmelCase : int = None
__lowerCAmelCase : np.IntTensor = None
__lowerCAmelCase : torch.FloatTensor = None # sigma(t_i)
def UpperCamelCase__ ( self , A_ , A_ = None ) ->torch.FloatTensor:
'''simple docstring'''
return sample
def UpperCamelCase__ ( self , A_ , A_ = None ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : str = num_inference_steps
__lowerCAmelCase : Dict = np.arange(0 , self.num_inference_steps )[::-1].copy()
__lowerCAmelCase : Optional[Any] = torch.from_numpy(A_ ).to(A_ )
__lowerCAmelCase : Tuple = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__lowerCAmelCase : Optional[int] = torch.tensor(A_ , dtype=torch.floataa , device=A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ = None ) ->Tuple[torch.FloatTensor, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
__lowerCAmelCase : List[str] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
__lowerCAmelCase : List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
__lowerCAmelCase : int = self.config.s_noise * randn_tensor(sample.shape , generator=A_ ).to(sample.device )
__lowerCAmelCase : str = sigma + gamma * sigma
__lowerCAmelCase : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ = True , ) ->Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = sample_hat + sigma_hat * model_output
__lowerCAmelCase : int = (sample_hat - pred_original_sample) / sigma_hat
__lowerCAmelCase : Tuple = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=A_ , derivative=A_ , pred_original_sample=A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ = True , ) ->Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
__lowerCAmelCase : str = sample_prev + sigma_prev * model_output
__lowerCAmelCase : List[Any] = (sample_prev - pred_original_sample) / sigma_prev
__lowerCAmelCase : Any = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=A_ , derivative=A_ , pred_original_sample=A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ ) ->Any:
'''simple docstring'''
raise NotImplementedError()
| 275 | 0 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = ["input_values", "padding_mask"]
def __init__( self : Optional[Any] , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 2_40_00 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : float = None , __lowerCamelCase : float = None , **__lowerCamelCase : Any , ) -> Optional[int]:
super().__init__(feature_size=__lowerCamelCase , sampling_rate=__lowerCamelCase , padding_value=__lowerCamelCase , **__lowerCamelCase )
A : Dict = chunk_length_s
A : Tuple = overlap
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : str , __lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCamelCase : Optional[Union[bool, str, PaddingStrategy]] = None , __lowerCamelCase : Optional[bool] = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[int] = None , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
A : Optional[Any] = True
A : Union[str, Any] = bool(
isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
A : Dict = [np.asarray(__lowerCamelCase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__lowerCamelCase , np.ndarray ):
A : int = np.asarray(__lowerCamelCase , dtype=np.floataa )
elif isinstance(__lowerCamelCase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
A : Dict = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
A : int = [np.asarray(__lowerCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__lowerCamelCase ):
if example.ndim > 2:
raise ValueError(F"""Expected input shape (channels, length) but got shape {example.shape}""" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F"""Expected mono audio but example has {example.shape[-1]} channels""" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F"""Expected stereo audio but example has {example.shape[-1]} channels""" )
A : Union[str, Any] = None
A : Tuple = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
A : Union[str, Any] = min(array.shape[0] for array in raw_audio )
A : List[Any] = int(np.floor(max_length / self.chunk_stride ) )
A : str = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
A : Optional[Any] = max(array.shape[0] for array in raw_audio )
A : Union[str, Any] = int(np.ceil(max_length / self.chunk_stride ) )
A : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
A : Any = "max_length"
else:
A : int = input_values
# normal padding on batch
if padded_inputs is None:
A : List[Any] = self.pad(
__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase , padding=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
if padding:
A : Tuple = padded_inputs.pop("attention_mask" )
A : Any = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
A : Union[str, Any] = example[..., None]
input_values.append(example.T )
A : Union[str, Any] = input_values
if return_tensors is not None:
A : List[Any] = padded_inputs.convert_to_tensors(__lowerCamelCase )
return padded_inputs | 256 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def UpperCAmelCase ( _lowerCamelCase ):
A : Any = []
for line in lines:
A : List[str] = re.sub(R"#.*" , "" , _lowerCamelCase ) # remove comments
if line:
filtered_lines.append(_lowerCamelCase )
A : str = "\n".join(_lowerCamelCase )
# Make a hash from all this code
A : Any = full_str.encode("utf-8" )
return shaaaa(_lowerCamelCase ).hexdigest()
# get importable module names and hash for caching
__SCREAMING_SNAKE_CASE = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__SCREAMING_SNAKE_CASE = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__SCREAMING_SNAKE_CASE = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
__SCREAMING_SNAKE_CASE = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""") | 256 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : Union[str, Any] = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "vit_mae"
def __init__( self : Dict , A : List[str]=7_68 , A : Any=12 , A : Union[str, Any]=12 , A : Tuple=30_72 , A : Any="gelu" , A : Tuple=0.0 , A : List[str]=0.0 , A : Tuple=0.02 , A : Tuple=1e-12 , A : int=2_24 , A : Dict=16 , A : int=3 , A : Tuple=True , A : Tuple=16 , A : Optional[Any]=5_12 , A : Union[str, Any]=8 , A : List[Any]=20_48 , A : Dict=0.75 , A : Any=False , **A : Optional[int] , ) -> Union[str, Any]:
super().__init__(**A )
lowercase_ : List[Any] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : int = attention_probs_dropout_prob
lowercase_ : int = initializer_range
lowercase_ : Dict = layer_norm_eps
lowercase_ : Optional[Any] = image_size
lowercase_ : str = patch_size
lowercase_ : Dict = num_channels
lowercase_ : Any = qkv_bias
lowercase_ : Union[str, Any] = decoder_num_attention_heads
lowercase_ : Optional[Any] = decoder_hidden_size
lowercase_ : List[str] = decoder_num_hidden_layers
lowercase_ : List[Any] = decoder_intermediate_size
lowercase_ : Optional[Any] = mask_ratio
lowercase_ : Optional[Any] = norm_pix_loss
| 33 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : Union[str, Any] = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "vit_mae"
def __init__( self : Dict , A : List[str]=7_68 , A : Any=12 , A : Union[str, Any]=12 , A : Tuple=30_72 , A : Any="gelu" , A : Tuple=0.0 , A : List[str]=0.0 , A : Tuple=0.02 , A : Tuple=1e-12 , A : int=2_24 , A : Dict=16 , A : int=3 , A : Tuple=True , A : Tuple=16 , A : Optional[Any]=5_12 , A : Union[str, Any]=8 , A : List[Any]=20_48 , A : Dict=0.75 , A : Any=False , **A : Optional[int] , ) -> Union[str, Any]:
super().__init__(**A )
lowercase_ : List[Any] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : int = attention_probs_dropout_prob
lowercase_ : int = initializer_range
lowercase_ : Dict = layer_norm_eps
lowercase_ : Optional[Any] = image_size
lowercase_ : str = patch_size
lowercase_ : Dict = num_channels
lowercase_ : Any = qkv_bias
lowercase_ : Union[str, Any] = decoder_num_attention_heads
lowercase_ : Optional[Any] = decoder_hidden_size
lowercase_ : List[str] = decoder_num_hidden_layers
lowercase_ : List[Any] = decoder_intermediate_size
lowercase_ : Optional[Any] = mask_ratio
lowercase_ : Optional[Any] = norm_pix_loss
| 33 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = XLMTokenizer
_lowerCamelCase = False
def UpperCamelCase__( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__A : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__A : Dict = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__A : Union[str, Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
__A : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__A : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__lowerCamelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__lowerCamelCase ) )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[int] = '''lower newer'''
__A : int = '''lower newer'''
return input_text, output_text
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Union[str, Any] = XLMTokenizer(self.vocab_file , self.merges_file )
__A : Optional[Any] = '''lower'''
__A : Any = ['''low''', '''er</w>''']
__A : Tuple = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__A : str = tokens + ['''<unk>''']
__A : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[int] = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
__A : Union[str, Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCamelCase )
__A : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCamelCase )
__A : int = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
__A : List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 371 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
a_ = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
a_ = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
a_ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def __lowercase ( snake_case_ : str ) ->dict[str, int]:
'''simple docstring'''
__A : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __lowercase ( snake_case_ : tuple ) ->str:
'''simple docstring'''
return x[0]
def __lowercase ( snake_case_ : str ) ->str:
'''simple docstring'''
__A : Union[str, Any] = get_letter_count(snake_case_ )
__A : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(snake_case_ )
__A : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find ,reverse=snake_case_ )
__A : Optional[int] = ''''''.join(freq_to_letter[freq] )
__A : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=snake_case_ ,reverse=snake_case_ )
__A : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(snake_case_ )
def __lowercase ( snake_case_ : str ) ->int:
'''simple docstring'''
__A : Any = get_frequency_order(snake_case_ )
__A : str = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291 | 0 |
import argparse
a_ = '''docs/source/_static/js/custom.js'''
def _a ( UpperCamelCase_ : List[Any] ) -> Any:
"""simple docstring"""
with open(UpperCamelCase_ , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
lowerCAmelCase__ = F"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += F" \"v{version}\": \"v{version}\",\n"
with open(UpperCamelCase_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(UpperCamelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
a_ = parser.parse_args()
update_custom_js(args.version)
| 340 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=1_024 , UpperCamelCase_ : Dict=1_024 , UpperCamelCase_ : List[str]=False , **UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="train" , **UpperCamelCase_ )
lowerCAmelCase__ = tok.pad_token_id
def get_lens(UpperCamelCase_ : str ):
lowerCAmelCase__ = tqdm(
DataLoader(UpperCamelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCamelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowerCAmelCase__ = []
for batch in dl:
lowerCAmelCase__ = batch["input_ids"].ne(UpperCamelCase_ ).sum(1 ).tolist()
lowerCAmelCase__ = batch["labels"].ne(UpperCamelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCamelCase_ , UpperCamelCase_ ):
max_lens.append(max(UpperCamelCase_ , UpperCamelCase_ ) )
else:
max_lens.extend(UpperCamelCase_ )
return max_lens
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="val" , **UpperCamelCase_ )
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
pickle_save(UpperCamelCase_ , train_ds.len_file )
pickle_save(UpperCamelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 340 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def _A ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if "cls_token" in name:
a__ : int =name.replace("cls_token" , "vit.embeddings.cls_token" )
if "mask_token" in name:
a__ : Any =name.replace("mask_token" , "decoder.mask_token" )
if "decoder_pos_embed" in name:
a__ : Dict =name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
a__ : Any =name.replace("pos_embed" , "vit.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
a__ : Union[str, Any] =name.replace("patch_embed.proj" , "vit.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a__ : Union[str, Any] =name.replace("patch_embed.norm" , "vit.embeddings.norm" )
if "decoder_blocks" in name:
a__ : int =name.replace("decoder_blocks" , "decoder.decoder_layers" )
if "blocks" in name:
a__ : Optional[Any] =name.replace("blocks" , "vit.encoder.layer" )
if "attn.proj" in name:
a__ : Dict =name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
a__ : int =name.replace("attn" , "attention.self" )
if "norm1" in name:
a__ : Union[str, Any] =name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a__ : Optional[Any] =name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a__ : Any =name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a__ : Optional[int] =name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
a__ : Tuple =name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
a__ : Any =name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
a__ : Optional[int] =name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name:
a__ : Union[str, Any] =name.replace("norm.weight" , "vit.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name:
a__ : Dict =name.replace("norm.bias" , "vit.layernorm.bias" )
return name
def _A ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a__ : List[str] =orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "qkv" in key:
a__ : Union[str, Any] =key.split("." )
a__ : Union[str, Any] =int(key_split[1] )
if "decoder_blocks" in key:
a__ : int =config.decoder_hidden_size
a__ : List[str] ="decoder.decoder_layers."
if "weight" in key:
a__ : Dict =val[:dim, :]
a__ : int =val[dim : dim * 2, :]
a__ : List[str] =val[-dim:, :]
elif "bias" in key:
a__ : Union[str, Any] =val[:dim]
a__ : Tuple =val[dim : dim * 2]
a__ : Dict =val[-dim:]
else:
a__ : Union[str, Any] =config.hidden_size
a__ : str ="vit.encoder.layer."
if "weight" in key:
a__ : Tuple =val[:dim, :]
a__ : Any =val[dim : dim * 2, :]
a__ : Tuple =val[-dim:, :]
elif "bias" in key:
a__ : Optional[int] =val[:dim]
a__ : Optional[int] =val[dim : dim * 2]
a__ : Any =val[-dim:]
else:
a__ : int =val
return orig_state_dict
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
a__ : Optional[Any] =ViTMAEConfig()
if "large" in checkpoint_url:
a__ : int =1_024
a__ : Optional[Any] =4_096
a__ : Union[str, Any] =24
a__ : Union[str, Any] =16
elif "huge" in checkpoint_url:
a__ : int =14
a__ : Tuple =1_280
a__ : Tuple =5_120
a__ : int =32
a__ : Optional[int] =16
a__ : int =ViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
a__ : Optional[int] =torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location="cpu" )["model"]
a__ : Union[str, Any] =ViTMAEImageProcessor(size=config.image_size )
a__ : Tuple =convert_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
a__ : List[str] ="https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"
a__ : List[Any] =Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
a__ : Optional[int] =ViTMAEImageProcessor(size=config.image_size )
a__ : List[str] =image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
a__ : Tuple =model(**SCREAMING_SNAKE_CASE )
a__ : Optional[int] =outputs.logits
if "large" in checkpoint_url:
a__ : Union[str, Any] =torch.tensor(
[[-0.7_3_0_9, -0.7_1_2_8, -1.0_1_6_9], [-1.0_1_6_1, -0.9_0_5_8, -1.1_8_7_8], [-1.0_4_7_8, -0.9_4_1_1, -1.1_9_1_1]] )
elif "huge" in checkpoint_url:
a__ : List[str] =torch.tensor(
[[-1.1_5_9_9, -0.9_1_9_9, -1.2_2_2_1], [-1.1_9_5_2, -0.9_2_6_9, -1.2_3_0_7], [-1.2_1_4_3, -0.9_3_3_7, -1.2_2_6_2]] )
else:
a__ : Optional[int] =torch.tensor(
[[-0.9_1_9_2, -0.8_4_8_1, -1.1_2_5_9], [-1.1_3_4_9, -1.0_0_3_4, -1.2_5_9_9], [-1.1_7_5_7, -1.0_4_2_9, -1.2_7_2_6]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCAmelCase : List[str] = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 368 |
from __future__ import annotations
import math
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if num <= 0:
a__ : List[str] =f'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(SCREAMING_SNAKE_CASE )
a__ : Union[str, Any] =[True] * (num + 1)
a__ : Union[str, Any] =[]
a__ : str =2
a__ : Any =int(math.sqrt(SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
a__ : Optional[int] =False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 148 | 0 |
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool:
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool:
# Base Case
if curr_ind == len(UpperCAmelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(UpperCAmelCase ) ):
if valid_connection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
# Insert current vertex into path as next transition
snake_case_ = next_ver
# Validate created path
if util_hamilton_cycle(UpperCAmelCase , UpperCAmelCase , curr_ind + 1 ):
return True
# Backtrack
snake_case_ = -1
return False
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 0 ) -> list[int]:
snake_case_ = [-1] * (len(UpperCAmelCase ) + 1)
# initialize start and end of path with starting index
snake_case_ = snake_case_ = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(UpperCAmelCase , UpperCAmelCase , 1 ) else []
| 69 |
"""simple docstring"""
def A ( snake_case__ = 50 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'{solution() = }')
| 165 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ : Optional[Any] = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 210 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> List[Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
SCREAMING_SNAKE_CASE_ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
SCREAMING_SNAKE_CASE_ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
SCREAMING_SNAKE_CASE_ = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
SCREAMING_SNAKE_CASE_ = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
SCREAMING_SNAKE_CASE_ = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
SCREAMING_SNAKE_CASE_ = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
SCREAMING_SNAKE_CASE_ = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
SCREAMING_SNAKE_CASE_ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
SCREAMING_SNAKE_CASE_ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
SCREAMING_SNAKE_CASE_ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
SCREAMING_SNAKE_CASE_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
SCREAMING_SNAKE_CASE_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
SCREAMING_SNAKE_CASE_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
SCREAMING_SNAKE_CASE_ = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
SCREAMING_SNAKE_CASE_ = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
SCREAMING_SNAKE_CASE_ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
SCREAMING_SNAKE_CASE_ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
SCREAMING_SNAKE_CASE_ = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
SCREAMING_SNAKE_CASE_ = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int ) -> List[str]:
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = orig_state_dict.pop(__UpperCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
SCREAMING_SNAKE_CASE_ = key.split('.' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = int(key_split[2] ), int(key_split[4] )
SCREAMING_SNAKE_CASE_ = config.vision_config.hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE_ = val[:dim, :]
SCREAMING_SNAKE_CASE_ = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_ = val[:dim]
SCREAMING_SNAKE_CASE_ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
SCREAMING_SNAKE_CASE_ = key.split('.' )
SCREAMING_SNAKE_CASE_ = int(key_split[3] )
SCREAMING_SNAKE_CASE_ = config.text_config.hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE_ = val[:dim, :]
SCREAMING_SNAKE_CASE_ = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE_ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_ = val[:dim]
SCREAMING_SNAKE_CASE_ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE_ = val[-dim:]
else:
SCREAMING_SNAKE_CASE_ = rename_key(__UpperCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
SCREAMING_SNAKE_CASE_ = val.squeeze_()
else:
SCREAMING_SNAKE_CASE_ = val
return orig_state_dict
def UpperCAmelCase_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : Dict="groupvit-gcc-yfcc" , __UpperCAmelCase : Any=False ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = GroupViTConfig()
SCREAMING_SNAKE_CASE_ = GroupViTModel(__UpperCAmelCase ).eval()
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCAmelCase , map_location='cpu' )['model']
SCREAMING_SNAKE_CASE_ = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__UpperCAmelCase ) == 0)
# verify result
SCREAMING_SNAKE_CASE_ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors='pt' )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__UpperCAmelCase )
if model_name == "groupvit-gcc-yfcc":
SCREAMING_SNAKE_CASE_ = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
SCREAMING_SNAKE_CASE_ = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(f"Model name {model_name} not supported." )
assert torch.allclose(outputs.logits_per_image , __UpperCAmelCase , atol=1E-3 )
processor.save_pretrained(__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
print('Successfully saved processor and model to' , __UpperCAmelCase )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(__UpperCAmelCase , organization='nielsr' )
model.push_to_hub(__UpperCAmelCase , organization='nielsr' )
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
lowerCamelCase__ : List[str] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 210 | 1 |
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ) -> Dict:
if hor == 128:
_lowerCAmelCase : Any = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
_lowerCAmelCase : Optional[int] = (32, 128, 256)
_lowerCAmelCase : Any = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
_lowerCAmelCase : Tuple = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
_lowerCAmelCase : int = (32, 64, 128, 256)
_lowerCAmelCase : List[Any] = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
_lowerCAmelCase : Tuple = torch.load(f"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
_lowerCAmelCase : Union[str, Any] = model.state_dict()
_lowerCAmelCase : Dict = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 65536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
_lowerCAmelCase : Optional[int] = UNetaDModel(**_lowerCamelCase )
print(f"length of state dict: {len(state_dict.keys() )}" )
print(f"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
_lowerCAmelCase : int = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_lowerCAmelCase : Optional[int] = state_dict.pop(_lowerCamelCase )
hf_value_function.load_state_dict(_lowerCamelCase )
torch.save(hf_value_function.state_dict() ,f"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(f"hub/hopper-medium-v2/unet/hor{hor}/config.json" ,"""w""" ) as f:
json.dump(_lowerCamelCase ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
_lowerCAmelCase : List[str] = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 65536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
_lowerCAmelCase : Union[str, Any] = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
_lowerCAmelCase : Any = model
_lowerCAmelCase : int = UNetaDModel(**_lowerCamelCase )
print(f"length of state dict: {len(state_dict.keys() )}" )
print(f"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
_lowerCAmelCase : Optional[Any] = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_lowerCAmelCase : Optional[int] = state_dict.pop(_lowerCamelCase )
hf_value_function.load_state_dict(_lowerCamelCase )
torch.save(hf_value_function.state_dict() ,"""hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" ,"""w""" ) as f:
json.dump(_lowerCamelCase ,_lowerCamelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 44 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
lowercase__ = field(
default=A_, metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(A_ )} )
lowercase__ = field(
default=A_, metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
lowercase__ = field(
default=1_28, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
lowercase__ = field(
default=1_28, metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''}, )
lowercase__ = field(
default=64, metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
}, )
lowercase__ = field(
default=30, metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
lowercase__ = field(
default=0.0, metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
lowercase__ = field(
default=20, metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
lowercase__ = field(
default=0, metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
}, )
lowercase__ = field(default=1, metadata={'''help''': '''multiple threads for converting example to features'''} )
class UpperCAmelCase_ ( A_ ):
lowercase__ = '''train'''
lowercase__ = '''dev'''
class UpperCAmelCase_ ( A_ ):
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
def __init__( self : List[Any] , snake_case_ : SquadDataTrainingArguments , snake_case_ : PreTrainedTokenizer , snake_case_ : Optional[int] = None , snake_case_ : Union[str, Split] = Split.train , snake_case_ : Optional[bool] = False , snake_case_ : Optional[str] = None , snake_case_ : Optional[str] = "pt" , ) -> Union[str, Any]:
'''simple docstring'''
A__ = args
A__ = is_language_sensitive
A__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(snake_case_ , snake_case_ ):
try:
A__ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
A__ = mode
# Load data features from cache or dataset file
A__ = "v2" if args.version_2_with_negative else "v1"
A__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A__ = cached_features_file + ".lock"
with FileLock(snake_case_ ):
if os.path.exists(snake_case_ ) and not args.overwrite_cache:
A__ = time.time()
A__ = torch.load(snake_case_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
A__ = self.old_features["features"]
A__ = self.old_features.get("dataset" , snake_case_ )
A__ = self.old_features.get("examples" , snake_case_ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
" future run" )
else:
if mode == Split.dev:
A__ = self.processor.get_dev_examples(args.data_dir )
else:
A__ = self.processor.get_train_examples(args.data_dir )
A__, A__ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=snake_case_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=snake_case_ , )
A__ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , snake_case_ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Union[str, Any] , snake_case_ : Any ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
A__ = self.features[i]
A__ = torch.tensor(feature.input_ids , dtype=torch.long )
A__ = torch.tensor(feature.attention_mask , dtype=torch.long )
A__ = torch.tensor(feature.token_type_ids , dtype=torch.long )
A__ = torch.tensor(feature.cls_index , dtype=torch.long )
A__ = torch.tensor(feature.p_mask , dtype=torch.float )
A__ = torch.tensor(feature.is_impossible , dtype=torch.float )
A__ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
A__ = torch.tensor(feature.start_position , dtype=torch.long )
A__ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 247 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCAmelCase )-> int:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
UpperCAmelCase : int =DetaConfig(
backbone_config=__lowerCAmelCase , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=__lowerCAmelCase , with_box_refine=__lowerCAmelCase , two_stage=__lowerCAmelCase , )
# set labels
UpperCAmelCase : Optional[int] ='''huggingface/label-files'''
if "o365" in model_name:
UpperCAmelCase : Tuple =3_66
UpperCAmelCase : Union[str, Any] ='''object365-id2label.json'''
else:
UpperCAmelCase : Optional[Any] =91
UpperCAmelCase : List[Any] ='''coco-detection-id2label.json'''
UpperCAmelCase : Union[str, Any] =num_labels
UpperCAmelCase : Optional[int] =json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCAmelCase : Tuple ={int(__lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : int =idalabel
UpperCAmelCase : int ={v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( __lowerCAmelCase )-> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =[]
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =dct.pop(__lowerCAmelCase )
UpperCAmelCase : int =val
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase : Optional[Any] =num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase : Optional[int] =state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
UpperCAmelCase : str =state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Dict =in_proj_weight[:dim, :]
UpperCAmelCase : Union[str, Any] =in_proj_bias[: dim]
UpperCAmelCase : Optional[Any] =in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase : Dict =in_proj_bias[
dim : dim * 2
]
UpperCAmelCase : str =in_proj_weight[
-dim :, :
]
UpperCAmelCase : Tuple =in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> List[Any]:
'''simple docstring'''
UpperCAmelCase : str =config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase : int =state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase : int =state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Optional[Any] =in_proj_weight[:hidden_size, :]
UpperCAmelCase : str =in_proj_bias[:hidden_size]
UpperCAmelCase : Optional[int] =in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCAmelCase : Optional[Any] =in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase : Dict =in_proj_weight[-hidden_size:, :]
UpperCAmelCase : List[str] =in_proj_bias[-hidden_size:]
def lowerCAmelCase_ ( )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Dict ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Optional[Any] =Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> str:
'''simple docstring'''
UpperCAmelCase : Dict =get_deta_config(__lowerCAmelCase )
# load original state dict
if model_name == "deta-swin-large":
UpperCAmelCase : Optional[Any] =hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
UpperCAmelCase : str =hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
UpperCAmelCase : List[Any] =torch.load(__lowerCAmelCase , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(__lowerCAmelCase , param.shape )
# rename keys
UpperCAmelCase : Tuple =create_rename_keys(__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_swin_q_k_v(__lowerCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCAmelCase : Union[str, Any] =state_dict.pop(__lowerCAmelCase )
UpperCAmelCase : str =val
if "input_proj" in key:
UpperCAmelCase : Tuple =state_dict.pop(__lowerCAmelCase )
UpperCAmelCase : Union[str, Any] =val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCAmelCase : int =state_dict.pop(__lowerCAmelCase )
UpperCAmelCase : str =val
# finally, create HuggingFace model and load state dict
UpperCAmelCase : Any =DetaForObjectDetection(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
UpperCAmelCase : Tuple ='''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(__lowerCAmelCase )
# load image processor
UpperCAmelCase : Tuple =DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
UpperCAmelCase : List[str] =prepare_img()
UpperCAmelCase : Optional[int] =processor(images=__lowerCAmelCase , return_tensors='''pt''' )
UpperCAmelCase : Union[str, Any] =encoding['''pixel_values''']
UpperCAmelCase : Tuple =model(pixel_values.to(__lowerCAmelCase ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
UpperCAmelCase : Union[str, Any] =torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
UpperCAmelCase : List[str] =torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
UpperCAmelCase : str =torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
UpperCAmelCase : List[str] =torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowerCAmelCase ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowerCAmelCase ) , atol=1e-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 359 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case = logging.get_logger(__name__)
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""pixel_values"""]
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = None , snake_case__ = None , snake_case__ = True , **snake_case__ , ) -> None:
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase : List[str] =size if size is not None else {'''height''': 384, '''width''': 384}
UpperCAmelCase : List[str] =get_size_dict(snake_case__ , default_to_square=snake_case__ )
UpperCAmelCase : List[str] =do_resize
UpperCAmelCase : Tuple =size
UpperCAmelCase : Optional[Any] =resample
UpperCAmelCase : Optional[Any] =do_rescale
UpperCAmelCase : Dict =rescale_factor
UpperCAmelCase : Union[str, Any] =do_normalize
UpperCAmelCase : Dict =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase : Any =image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase : List[Any] =do_convert_rgb
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = None , **snake_case__ , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase : int =get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
UpperCAmelCase : Union[str, Any] =(size['''height'''], size['''width'''])
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ) -> Optional[int]:
'''simple docstring'''
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ) -> np.ndarray:
'''simple docstring'''
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase : List[str] =do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : Union[str, Any] =resample if resample is not None else self.resample
UpperCAmelCase : Any =do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : int =rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase : Union[str, Any] =do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase : List[str] =image_mean if image_mean is not None else self.image_mean
UpperCAmelCase : List[Any] =image_std if image_std is not None else self.image_std
UpperCAmelCase : Optional[Any] =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase : List[Any] =size if size is not None else self.size
UpperCAmelCase : Tuple =get_size_dict(snake_case__ , default_to_square=snake_case__ )
UpperCAmelCase : int =make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase : Optional[int] =[convert_to_rgb(snake_case__ ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase : str =[to_numpy_array(snake_case__ ) for image in images]
if do_resize:
UpperCAmelCase : List[Any] =[self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_rescale:
UpperCAmelCase : int =[self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
UpperCAmelCase : Dict =[self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
UpperCAmelCase : Optional[int] =[to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
UpperCAmelCase : str =BatchFeature(data={'''pixel_values''': images} , tensor_type=snake_case__ )
return encoded_outputs
| 78 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
lowercase : Optional[Any] = """
Human: <<task>>
Assistant: """
lowercase : Tuple = """huggingface-tools/default-prompts"""
lowercase : str = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="run" ) -> Optional[int]:
if prompt_or_repo_id is None:
lowercase : str = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("""\\s""" , _a ) is not None:
return prompt_or_repo_id
lowercase : Dict = cached_file(
_a , PROMPT_FILES[mode] , repo_type="""dataset""" , user_agent={"""agent""": agent_name} )
with open(_a , """r""" , encoding="""utf-8""" ) as f:
return f.read()
| 20 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: str ) -> int:
UpperCAmelCase_ : List[Any] = """ylacombe/bark-small"""
UpperCAmelCase_ : Tuple = tempfile.mkdtemp()
UpperCAmelCase_ : Union[str, Any] = """en_speaker_1"""
UpperCAmelCase_ : Optional[Any] = """This is a test string"""
UpperCAmelCase_ : int = """speaker_embeddings_path.json"""
UpperCAmelCase_ : Any = """speaker_embeddings"""
def A__ ( self: Tuple ,**lowerCamelCase_: List[str] ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint ,**lowerCamelCase_ )
def A__ ( self: str ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def A__ ( self: List[Any] ) -> int:
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
@slow
def A__ ( self: List[Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
processor.save_pretrained(
self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,)
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
UpperCAmelCase_ : Optional[int] = 35
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Dict = 8
UpperCAmelCase_ : Optional[int] = {
"""semantic_prompt""": np.ones(lowerCamelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : str = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,"""file.npz""" )
np.savez(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : int = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : Union[str, Any] = processor(text=self.input_string ,voice_preset=self.voice_preset )
def A__ ( self: Dict ) -> Tuple:
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Dict = BarkProcessor(tokenizer=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string )
UpperCAmelCase_ : str = tokenizer(
self.input_string ,padding="""max_length""" ,max_length=256 ,add_special_tokens=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
| 345 | 0 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __get__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
UpperCamelCase : List[Any] = '''__cached_''' + self.fget.__name__
UpperCamelCase : Tuple = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if cached is None:
UpperCamelCase : Optional[Any] = self.fget(__SCREAMING_SNAKE_CASE )
setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return cached
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
UpperCamelCase : Any = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"""invalid truth value {val!r}""" )
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
if is_torch_fx_proxy(UpperCamelCase__ ):
return True
if is_torch_available():
import torch
if isinstance(UpperCamelCase__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(UpperCamelCase__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(UpperCamelCase__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(UpperCamelCase__ , np.ndarray )
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return isinstance(UpperCamelCase__ , np.ndarray )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
return _is_numpy(UpperCamelCase__ )
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
import torch
return isinstance(UpperCamelCase__ , torch.Tensor )
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch(UpperCamelCase__ )
def a ( SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
import torch
return isinstance(UpperCamelCase__ , torch.device )
def a ( SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(UpperCamelCase__ )
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
import torch
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if hasattr(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase : Optional[Any] = getattr(UpperCamelCase__ , UpperCamelCase__ )
else:
return False
return isinstance(UpperCamelCase__ , torch.dtype )
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(UpperCamelCase__ )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
import tensorflow as tf
return isinstance(UpperCamelCase__ , tf.Tensor )
def a ( SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(UpperCamelCase__ )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(UpperCamelCase__ , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(UpperCamelCase__ )
return type(UpperCamelCase__ ) == tf.Tensor
def a ( SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCamelCase__ )
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(UpperCamelCase__ , jnp.ndarray )
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
return False if not is_flax_available() else _is_jax(UpperCamelCase__ )
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , (dict, UserDict) ):
return {k: to_py_obj(UpperCamelCase__ ) for k, v in obj.items()}
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return [to_py_obj(UpperCamelCase__ ) for o in obj]
elif is_tf_tensor(UpperCamelCase__ ):
return obj.numpy().tolist()
elif is_torch_tensor(UpperCamelCase__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(UpperCamelCase__ ):
return np.asarray(UpperCamelCase__ ).tolist()
elif isinstance(UpperCamelCase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , (dict, UserDict) ):
return {k: to_numpy(UpperCamelCase__ ) for k, v in obj.items()}
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return np.array(UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
return obj.numpy()
elif is_torch_tensor(UpperCamelCase__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(UpperCamelCase__ ):
return np.asarray(UpperCamelCase__ )
else:
return obj
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = fields(self )
# Safety and consistency checks
if not len(__SCREAMING_SNAKE_CASE ):
raise ValueError(f"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" )
UpperCamelCase : Union[str, Any] = getattr(self , class_fields[0].name )
UpperCamelCase : List[Any] = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = first_field.items()
UpperCamelCase : Any = True
else:
try:
UpperCamelCase : int = iter(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = True
except TypeError:
UpperCamelCase : Optional[Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__SCREAMING_SNAKE_CASE ):
if (
not isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) )
or not len(__SCREAMING_SNAKE_CASE ) == 2
or not isinstance(element[0] , __SCREAMING_SNAKE_CASE )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCamelCase : str = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
UpperCamelCase : Any = element[1]
elif first_field is not None:
UpperCamelCase : Any = first_field
else:
for field in class_fields:
UpperCamelCase : Dict = getattr(self , field.name )
if v is not None:
UpperCamelCase : Optional[Any] = v
def __delitem__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
super().__setattr__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __setitem__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__setitem__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class UpperCAmelCase_ ( _a, _a):
'''simple docstring'''
@classmethod
def _lowercase ( cls , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = "longest"
__UpperCamelCase : List[Any] = "max_length"
__UpperCamelCase : List[str] = "do_not_pad"
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Any = "pt"
__UpperCamelCase : str = "tf"
__UpperCamelCase : int = "np"
__UpperCamelCase : Union[str, Any] = "jax"
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = context_managers
UpperCamelCase : Optional[int] = ExitStack()
def __enter__( self ):
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(__SCREAMING_SNAKE_CASE )
def __exit__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
self.stack.__exit__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : List[str] = infer_framework(UpperCamelCase__ )
if framework == "tf":
UpperCamelCase : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCamelCase : Any = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCamelCase : int = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = model_class.__name__
UpperCamelCase : Union[str, Any] = infer_framework(UpperCamelCase__ )
if framework == "tf":
UpperCamelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCamelCase : Optional[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCamelCase : List[str] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a ( SCREAMING_SNAKE_CASE_ : MutableMapping , SCREAMING_SNAKE_CASE_ : str = "" , SCREAMING_SNAKE_CASE_ : str = "." ):
"""simple docstring"""
def _flatten_dict(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int="" , SCREAMING_SNAKE_CASE_ : Optional[Any]="." ):
for k, v in d.items():
UpperCamelCase : Tuple = str(UpperCamelCase__ ) + delimiter + str(UpperCamelCase__ ) if parent_key else k
if v and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
yield from flatten_dict(UpperCamelCase__ , UpperCamelCase__ , delimiter=UpperCamelCase__ ).items()
else:
yield key, v
return dict(_flatten_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
@contextmanager
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : bool = False ):
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str]=None ):
"""simple docstring"""
if is_numpy_array(UpperCamelCase__ ):
return np.transpose(UpperCamelCase__ , axes=UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.T if axes is None else array.permute(*UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.transpose(UpperCamelCase__ , perm=UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return jnp.transpose(UpperCamelCase__ , axes=UpperCamelCase__ )
else:
raise ValueError(F"""Type not supported for transpose: {type(UpperCamelCase__ )}.""" )
def a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
if is_numpy_array(UpperCamelCase__ ):
return np.reshape(UpperCamelCase__ , UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.reshape(*UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.reshape(UpperCamelCase__ , UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return jnp.reshape(UpperCamelCase__ , UpperCamelCase__ )
else:
raise ValueError(F"""Type not supported for reshape: {type(UpperCamelCase__ )}.""" )
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any=None ):
"""simple docstring"""
if is_numpy_array(UpperCamelCase__ ):
return np.squeeze(UpperCamelCase__ , axis=UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.squeeze() if axis is None else array.squeeze(dim=UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.squeeze(UpperCamelCase__ , axis=UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return jnp.squeeze(UpperCamelCase__ , axis=UpperCamelCase__ )
else:
raise ValueError(F"""Type not supported for squeeze: {type(UpperCamelCase__ )}.""" )
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
if is_numpy_array(UpperCamelCase__ ):
return np.expand_dims(UpperCamelCase__ , UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.unsqueeze(dim=UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.expand_dims(UpperCamelCase__ , axis=UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return jnp.expand_dims(UpperCamelCase__ , axis=UpperCamelCase__ )
else:
raise ValueError(F"""Type not supported for expand_dims: {type(UpperCamelCase__ )}.""" )
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
if is_numpy_array(UpperCamelCase__ ):
return np.size(UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.numel()
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.size(UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return array.size
else:
raise ValueError(F"""Type not supported for expand_dims: {type(UpperCamelCase__ )}.""" )
def a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(UpperCamelCase__ , (tuple, list) ):
UpperCamelCase : List[Any] = [F"""{repo_id}--{v}""" if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCamelCase : str = F"""{repo_id}--{value}"""
return auto_map
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
for base_class in inspect.getmro(UpperCamelCase__ ):
UpperCamelCase : List[Any] = base_class.__module__
UpperCamelCase : Union[str, Any] = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"""Could not infer framework from class {model_class}.""" )
| 352 |
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ):
"""simple docstring"""
UpperCamelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(SCREAMING_SNAKE_CASE_ ):
if len(SCREAMING_SNAKE_CASE_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(SCREAMING_SNAKE_CASE_ ) )
return data_lists
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
UpperCamelCase : list[list[float]] = []
for dlist, weight in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = max(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
UpperCamelCase : Dict = F"""Invalid weight of {weight:f} provided"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
score_lists.append(SCREAMING_SNAKE_CASE_ )
return score_lists
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ):
"""simple docstring"""
UpperCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = final_scores[j] + ele
return final_scores
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
UpperCamelCase : str = get_data(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = calculate_each_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = generate_final_scores(SCREAMING_SNAKE_CASE_ )
# append scores to source data
for i, ele in enumerate(SCREAMING_SNAKE_CASE_ ):
source_data[i].append(SCREAMING_SNAKE_CASE_ )
return source_data
| 315 | 0 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = 'Hello world! cécé herlolip'
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : int = FairseqRobertaModel.from_pretrained(_snake_case )
roberta.eval() # disable dropout
_A : Any = roberta.model.encoder.sentence_encoder
_A : Any = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings,hidden_size=roberta.cfg.model.encoder_embed_dim,num_hidden_layers=roberta.cfg.model.encoder_layers,num_attention_heads=roberta.cfg.model.encoder_attention_heads,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim,max_position_embeddings=514,type_vocab_size=1,layer_norm_eps=1e-5,)
if classification_head:
_A : Dict = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""",_snake_case )
_A : Union[str, Any] = XLMRobertaXLForSequenceClassification(_snake_case ) if classification_head else XLMRobertaXLForMaskedLM(_snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
_A : Optional[int] = roberta_sent_encoder.embed_tokens.weight
_A : int = roberta_sent_encoder.embed_positions.weight
_A : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
_A : Optional[int] = roberta_sent_encoder.layer_norm.weight
_A : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_A : BertLayer = model.roberta.encoder.layer[i]
_A : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
_A : RobertaAttention = layer.attention
_A : List[str] = roberta_layer.self_attn_layer_norm.weight
_A : List[Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
_A : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
_A : List[str] = roberta_layer.self_attn.q_proj.weight
_A : Tuple = roberta_layer.self_attn.q_proj.bias
_A : Tuple = roberta_layer.self_attn.k_proj.weight
_A : int = roberta_layer.self_attn.k_proj.bias
_A : List[str] = roberta_layer.self_attn.v_proj.weight
_A : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
_A : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
_A : Union[str, Any] = roberta_layer.self_attn.out_proj.weight
_A : List[str] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
_A : Tuple = roberta_layer.final_layer_norm.weight
_A : Optional[int] = roberta_layer.final_layer_norm.bias
# intermediate
_A : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
_A : List[Any] = roberta_layer.fca.weight
_A : Tuple = roberta_layer.fca.bias
# output
_A : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
_A : Tuple = roberta_layer.fca.weight
_A : Optional[int] = roberta_layer.fca.bias
# end of layer
if classification_head:
_A : List[Any] = roberta.model.classification_heads["""mnli"""].dense.weight
_A : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.bias
_A : Optional[Any] = roberta.model.classification_heads["""mnli"""].out_proj.weight
_A : Union[str, Any] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
_A : str = roberta.model.encoder.lm_head.dense.weight
_A : List[Any] = roberta.model.encoder.lm_head.dense.bias
_A : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight
_A : Dict = roberta.model.encoder.lm_head.layer_norm.bias
_A : Optional[int] = roberta.model.encoder.lm_head.weight
_A : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
_A : torch.Tensor = roberta.encode(_snake_case ).unsqueeze(0 ) # batch of size 1
_A : Tuple = model(_snake_case )[0]
if classification_head:
_A : Dict = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_snake_case ) )
else:
_A : Tuple = roberta.model(_snake_case )[0]
print(our_output.shape,their_output.shape )
_A : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
_A : Tuple = torch.allclose(_snake_case,_snake_case,atol=1e-3 )
print("""Do both models output the same tensors?""","""🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(_snake_case ).mkdir(parents=_snake_case,exist_ok=_snake_case )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
_snake_case = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 26 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(poly_a or [0] )[:]
SCREAMING_SNAKE_CASE__ : Tuple = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
SCREAMING_SNAKE_CASE__ : int = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
SCREAMING_SNAKE_CASE__ : List[str] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
SCREAMING_SNAKE_CASE__ : Optional[int] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
SCREAMING_SNAKE_CASE__ : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
SCREAMING_SNAKE_CASE__ : Tuple = self.__multiply()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return dft[0]
#
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.c_max_length // 2
while next_ncol > 0:
SCREAMING_SNAKE_CASE__ : Any = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root**next_ncol
# First half of next step
SCREAMING_SNAKE_CASE__ : str = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
SCREAMING_SNAKE_CASE__ : int = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_dft
SCREAMING_SNAKE_CASE__ : Tuple = next_ncol // 2
return dft[0]
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.__dft("""A""" )
SCREAMING_SNAKE_CASE__ : Dict = self.__dft("""B""" )
SCREAMING_SNAKE_CASE__ : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2
while next_ncol <= self.c_max_length:
SCREAMING_SNAKE_CASE__ : List[str] = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root ** (next_ncol // 2)
SCREAMING_SNAKE_CASE__ : Any = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_inverse_c
next_ncol *= 2
# Unpack
SCREAMING_SNAKE_CASE__ : Optional[Any] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__(self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """A = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
SCREAMING_SNAKE_CASE__ : int = """A*B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
from __future__ import annotations
SCREAMING_SNAKE_CASE__ : Any = list[list[int]]
# assigning initial values to the grid
SCREAMING_SNAKE_CASE__ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
SCREAMING_SNAKE_CASE__ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __magic_name__ ( __lowerCAmelCase : Matrix , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __magic_name__ ( __lowerCAmelCase : Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __magic_name__ ( __lowerCAmelCase : Matrix ) -> Matrix | None:
if location := find_empty_location(__lowerCAmelCase ):
__lowerCamelCase , __lowerCamelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
__lowerCamelCase = digit
if sudoku(__lowerCAmelCase ) is not None:
return grid
__lowerCamelCase = 0
return None
def __magic_name__ ( __lowerCAmelCase : Matrix ) -> None:
for row in grid:
for cell in row:
print(__lowerCAmelCase , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
SCREAMING_SNAKE_CASE__ : Dict = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 339 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __A ( self : Optional[int] ) -> Union[str, Any]:
__lowerCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
__lowerCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__lowerCamelCase = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
__lowerCamelCase = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
__lowerCamelCase = shift_tokens_right(SCREAMING_SNAKE_CASE__ , model.config.pad_token_id , model.config.decoder_start_token_id )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ).logits
__lowerCamelCase = optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE__ , onehot(SCREAMING_SNAKE_CASE__ , logits.shape[-1] ) ).mean()
__lowerCamelCase = -(labels.shape[-1] * loss.item())
__lowerCamelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 339 | 1 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : List[Any] = RobertaPreLayerNormConfig.from_pretrained(
lowercase__ , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
_lowerCamelCase : str = torch.load(hf_hub_download(repo_id=lowercase__ , filename='pytorch_model.bin' ) )
_lowerCamelCase : Dict = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
_lowerCamelCase : Any = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
_lowerCamelCase : Dict = tensor_value
_lowerCamelCase : List[Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowercase__ , config=lowercase__ , state_dict=lowercase__ )
model.save_pretrained(lowercase__ )
# convert tokenizer
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained(lowercase__ )
tokenizer.save_pretrained(lowercase__ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase__ = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 96 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase ( A__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , a_ : int , a_ : Optional[int]=13 , a_ : Optional[Any]=7 , a_ : Tuple=True , a_ : Optional[int]=True , a_ : List[str]=True , a_ : Union[str, Any]=True , a_ : List[Any]=99 , a_ : List[Any]=32 , a_ : Dict=5 , a_ : Tuple=4 , a_ : Any=37 , a_ : int="gelu" , a_ : Any=0.1 , a_ : Union[str, Any]=0.1 , a_ : Dict=5_12 , a_ : Union[str, Any]=16 , a_ : Optional[int]=2 , a_ : Dict=0.02 , a_ : List[str]=False , a_ : str=True , a_ : Any="None" , a_ : Dict=3 , a_ : List[str]=4 , a_ : Optional[Any]=None , ):
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : Optional[Any] = batch_size
lowerCAmelCase_ : Any = seq_length
lowerCAmelCase_ : int = is_training
lowerCAmelCase_ : List[Any] = use_input_mask
lowerCAmelCase_ : str = use_token_type_ids
lowerCAmelCase_ : Dict = use_labels
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : List[str] = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : Optional[int] = num_attention_heads
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Tuple = attention_probs_dropout_prob
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : Any = type_vocab_size
lowerCAmelCase_ : Dict = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Optional[Any] = num_labels
lowerCAmelCase_ : List[Any] = num_choices
lowerCAmelCase_ : Optional[Any] = relative_attention
lowerCAmelCase_ : Optional[int] = position_biased_input
lowerCAmelCase_ : Union[str, Any] = pos_att_type
lowerCAmelCase_ : Tuple = scope
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : str = None
if self.use_input_mask:
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase_ : int = None
if self.use_token_type_ids:
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Dict = None
if self.use_labels:
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : int ):
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Union[str, Any] = self.get_config()
lowerCAmelCase_ : Tuple = 3_00
return config
def lowerCamelCase ( self : List[Any] , a_ : Optional[Any] ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowerCamelCase ( self : Optional[Any] , a_ : List[str] , a_ : Union[str, Any] , a_ : Dict , a_ : str , a_ : int , a_ : Any , a_ : Tuple ):
lowerCAmelCase_ : Union[str, Any] = DebertaModel(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : str = model(a_ , attention_mask=a_ , token_type_ids=a_ )[0]
lowerCAmelCase_ : List[str] = model(a_ , token_type_ids=a_ )[0]
lowerCAmelCase_ : Optional[Any] = model(a_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowerCamelCase ( self : Optional[Any] , a_ : Optional[int] , a_ : int , a_ : List[str] , a_ : int , a_ : Tuple , a_ : int , a_ : List[Any] ):
lowerCAmelCase_ : List[Any] = DebertaForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : Dict = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self : str , a_ : List[str] , a_ : Tuple , a_ : Any , a_ : str , a_ : List[Any] , a_ : Tuple , a_ : Union[str, Any] ):
lowerCAmelCase_ : str = self.num_labels
lowerCAmelCase_ : List[str] = DebertaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : str = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a_ )
def lowerCamelCase ( self : Any , a_ : Union[str, Any] , a_ : Any , a_ : str , a_ : int , a_ : Dict , a_ : int , a_ : Tuple ):
lowerCAmelCase_ : int = self.num_labels
lowerCAmelCase_ : Any = DebertaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : int = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self : Dict , a_ : Dict , a_ : Optional[int] , a_ : Union[str, Any] , a_ : Any , a_ : List[Any] , a_ : int , a_ : str ):
lowerCAmelCase_ : Optional[int] = DebertaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : Tuple = model(
a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) : List[Any] = config_and_inputs
lowerCAmelCase_ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
a_ : int = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : Dict = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : List[Any] = True
a_ : Dict = False
a_ : int = False
a_ : str = False
a_ : List[Any] = False
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Union[str, Any] = DebertaModelTester(self )
lowerCAmelCase_ : List[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def lowerCamelCase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a_ )
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a_ )
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a_ )
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a_ )
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a_ )
@slow
def lowerCamelCase ( self : Optional[Any] ):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Union[str, Any] = DebertaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="Model not available yet" )
def lowerCamelCase ( self : Union[str, Any] ):
pass
@slow
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : int = DebertaModel.from_pretrained("microsoft/deberta-base" )
lowerCAmelCase_ : str = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
lowerCAmelCase_ : Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase_ : Dict = model(a_ , attention_mask=a_ )[0]
# compare the actual values for a slice.
lowerCAmelCase_ : Optional[Any] = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1e-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 241 | 0 |
'''simple docstring'''
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__lowerCAmelCase : Tuple =Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__lowerCAmelCase : List[str] ={"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
__lowerCAmelCase : List[str] ="zero2"
__lowerCAmelCase : Union[str, Any] ="zero3"
__lowerCAmelCase : List[Any] =[ZEROa, ZEROa]
def UpperCamelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
A__ = parameterized.to_safe_name("_".join(str(_lowerCamelCase ) for x in param.args ) )
return F"{func.__name__}_{param_based_name}"
# Cartesian-product of zero stages with models to test
__lowerCAmelCase : List[Any] =list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class UpperCAmelCase ( UpperCamelCase__ ):
@parameterized.expand(lowercase_ , name_func=lowercase_ )
def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :Dict )-> Optional[Any]:
self.run_and_check(
stage=lowercase_ , model=lowercase_ , distributed=lowercase_ , fpaa=lowercase_ , )
@require_torch_multi_gpu
@parameterized.expand(lowercase_ , name_func=lowercase_ )
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :int , lowercase_ :Any )-> Optional[int]:
self.run_and_check(
stage=lowercase_ , model=lowercase_ , distributed=lowercase_ , fpaa=lowercase_ , )
@parameterized.expand(lowercase_ , name_func=lowercase_ )
def UpperCAmelCase_ ( self :Tuple , lowercase_ :str , lowercase_ :Tuple )-> List[Any]:
self.run_and_check(
stage=lowercase_ , model=lowercase_ , distributed=lowercase_ , fpaa=lowercase_ , )
@require_torch_multi_gpu
@parameterized.expand(lowercase_ , name_func=lowercase_ )
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :List[Any] , lowercase_ :Union[str, Any] )-> Tuple:
self.run_and_check(
stage=lowercase_ , model=lowercase_ , distributed=lowercase_ , fpaa=lowercase_ , )
def UpperCAmelCase_ ( self :Dict , lowercase_ :Union[str, Any] )-> int:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :str , lowercase_ :str , lowercase_ :int = 10 , lowercase_ :bool = True , lowercase_ :bool = True , lowercase_ :bool = True , )-> List[str]:
A__ = models[model]
A__ = self.run_trainer(
stage=lowercase_ , model_name=lowercase_ , eval_steps=lowercase_ , num_train_epochs=1 , distributed=lowercase_ , fpaa=lowercase_ , )
self.do_checks(lowercase_ )
return output_dir
def UpperCAmelCase_ ( self :Dict , lowercase_ :str , lowercase_ :str , lowercase_ :int = 10 , lowercase_ :int = 1 , lowercase_ :bool = True , lowercase_ :bool = True , )-> int:
A__ = self.get_auto_remove_tmp_dir("./xxx" , after=lowercase_ )
A__ = F"\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(lowercase_ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n ".split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
A__ = F"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json".split()
A__ = [F"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"]
A__ = self.get_launcher(lowercase_ )
A__ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase_ , env=self.get_env() )
return output_dir
def UpperCAmelCase_ ( self :Tuple , lowercase_ :int=False )-> str:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
A__ = min(2 , get_gpu_count() ) if distributed else 1
return F"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()
| 123 |
'''simple docstring'''
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def UpperCamelCase ( _lowerCamelCase : bool = True , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Optional[Any] ):
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
A__ = False
if main_process_only:
A__ = PartialState().local_process_index == 0
return _tqdm(*_lowerCamelCase , **_lowerCamelCase , disable=_lowerCamelCase )
| 123 | 1 |
def __lowerCamelCase ( __a :list[int] , __a :str ) -> list[int]:
"""simple docstring"""
A__ = int(__a )
# Initialize Result
A__ = []
# Traverse through all denomination
for denomination in reversed(__a ):
# Find denominations
while int(__a ) >= int(__a ):
total_value -= int(__a )
answer.append(__a ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
A : Optional[Any] = []
A : Union[str, Any] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
A : List[str] = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
A : Dict = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
A : List[str] = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
A : List[str] = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F'''Following is minimal change for {value}: ''')
A : Dict = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 274 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __lowerCamelCase ( __a :Dict ) -> Any:
"""simple docstring"""
A__ = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def __lowerCamelCase ( __a :str ) -> Union[str, Any]:
"""simple docstring"""
A__ , A__ = emb.weight.shape
A__ = nn.Linear(__a , __a , bias=__a )
A__ = emb.weight.data
return lin_layer
def __lowerCamelCase ( __a :str ) -> List[str]:
"""simple docstring"""
A__ = torch.load(__a , map_location="""cpu""" )
A__ = Namespace(**checkpoint["""cfg"""]["""model"""] )
A__ = checkpoint["""model"""]
remove_ignore_keys_(__a )
A__ = state_dict["""decoder.embed_tokens.weight"""].shape[0]
A__ = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
A__ = XGLMConfig(
vocab_size=__a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
A__ = XGLMForCausalLM(__a )
A__ = model.load_state_dict(__a , strict=__a )
print(__a )
A__ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
A : str = parser.parse_args()
A : str = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 274 | 1 |
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCamelCase_ ( a_ ):
def __init__( self , *snake_case__ , snake_case__=None , snake_case__=None , **snake_case__ ) -> Optional[int]:
"""simple docstring"""
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase = eval_examples
UpperCAmelCase = post_process_function
def UpperCamelCase_ ( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = "eval" ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase = self.get_eval_dataloader(_lowerCAmelCase )
UpperCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase = self.compute_metrics
UpperCAmelCase = None
UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase = time.time()
try:
UpperCAmelCase = eval_loop(
_lowerCAmelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCAmelCase , metric_key_prefix=_lowerCAmelCase , )
finally:
UpperCAmelCase = compute_metrics
UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowerCAmelCase , _lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase = self.post_process_function(_lowerCAmelCase , _lowerCAmelCase , output.predictions )
UpperCAmelCase = self.compute_metrics(_lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
UpperCAmelCase = metrics.pop(_lowerCAmelCase )
metrics.update(output.metrics )
else:
UpperCAmelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowerCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCAmelCase )
return metrics
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__ = "test" ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.get_test_dataloader(_lowerCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase = self.compute_metrics
UpperCAmelCase = None
UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase = time.time()
try:
UpperCAmelCase = eval_loop(
_lowerCAmelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCAmelCase , metric_key_prefix=_lowerCAmelCase , )
finally:
UpperCAmelCase = compute_metrics
UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowerCAmelCase , _lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase = self.post_process_function(_lowerCAmelCase , _lowerCAmelCase , output.predictions , """predict""" )
UpperCAmelCase = self.compute_metrics(_lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
UpperCAmelCase = metrics.pop(_lowerCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCAmelCase )
| 354 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class UpperCamelCase_ ( a_ ):
_A : List[Any] = 'layoutlmv3'
def __init__( self , snake_case__=5_02_65 , snake_case__=7_68 , snake_case__=12 , snake_case__=12 , snake_case__=30_72 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=5_12 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-5 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=10_24 , snake_case__=1_28 , snake_case__=1_28 , snake_case__=True , snake_case__=32 , snake_case__=1_28 , snake_case__=64 , snake_case__=2_56 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=2_24 , snake_case__=3 , snake_case__=16 , snake_case__=None , **snake_case__ , ) -> Tuple:
"""simple docstring"""
super().__init__(
vocab_size=snake_case__ , hidden_size=snake_case__ , num_hidden_layers=snake_case__ , num_attention_heads=snake_case__ , intermediate_size=snake_case__ , hidden_act=snake_case__ , hidden_dropout_prob=snake_case__ , attention_probs_dropout_prob=snake_case__ , max_position_embeddings=snake_case__ , type_vocab_size=snake_case__ , initializer_range=snake_case__ , layer_norm_eps=snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ , )
UpperCAmelCase = max_ad_position_embeddings
UpperCAmelCase = coordinate_size
UpperCAmelCase = shape_size
UpperCAmelCase = has_relative_attention_bias
UpperCAmelCase = rel_pos_bins
UpperCAmelCase = max_rel_pos
UpperCAmelCase = has_spatial_attention_bias
UpperCAmelCase = rel_ad_pos_bins
UpperCAmelCase = max_rel_ad_pos
UpperCAmelCase = text_embed
UpperCAmelCase = visual_embed
UpperCAmelCase = input_size
UpperCAmelCase = num_channels
UpperCAmelCase = patch_size
UpperCAmelCase = classifier_dropout
class UpperCamelCase_ ( a_ ):
_A : str = version.parse('1.12' )
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def UpperCamelCase_ ( self ) -> float:
"""simple docstring"""
return 1e-5
@property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return 12
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , snake_case__ = 3 , snake_case__ = 40 , snake_case__ = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , snake_case__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(snake_case__ )
UpperCAmelCase = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case__ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCAmelCase = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCAmelCase = self._generate_dummy_images(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase = dict(
processor(
snake_case__ , text=snake_case__ , boxes=snake_case__ , return_tensors=snake_case__ , ) )
return inputs
| 248 | 0 |
"""simple docstring"""
def lowercase ( A_ , A_ )-> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
a : Optional[Any] = str(bin(A_ ) )
binary_number += "0" * shift_amount
return binary_number
def lowercase ( A_ , A_ )-> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
a : Tuple = str(bin(A_ ) )[2:]
if shift_amount >= len(A_ ):
return "0b0"
a : Tuple = binary_number[: len(A_ ) - shift_amount]
return "0b" + shifted_binary_number
def lowercase ( A_ , A_ )-> str:
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
a : str = "0" + str(bin(A_ ) ).strip("-" )[2:]
else: # Get binary (2's complement) representation of negative number
a : int = len(bin(A_ )[3:] ) # Find 2's complement of number
a : List[str] = bin(abs(A_ ) - (1 << binary_number_length) )[3:]
a : Dict = (
"1" + "0" * (binary_number_length - len(A_ )) + binary_number
)
if shift_amount >= len(A_ ):
return "0b" + binary_number[0] * len(A_ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(A_ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Union[str, Any] = 'upernet'
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : str=5_1_2 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Tuple=0.4 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_8_4 , SCREAMING_SNAKE_CASE_ : str=2_5_6 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : str=2_5_5 , **SCREAMING_SNAKE_CASE_ : str , ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE_ )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = backbone_config.get('''model_type''' )
lowercase_ = CONFIG_MAPPING[backbone_model_type]
lowercase_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
lowercase_ = backbone_config
lowercase_ = hidden_size
lowercase_ = initializer_range
lowercase_ = pool_scales
lowercase_ = use_auxiliary_head
lowercase_ = auxiliary_loss_weight
lowercase_ = auxiliary_in_channels
lowercase_ = auxiliary_channels
lowercase_ = auxiliary_num_convs
lowercase_ = auxiliary_concat_input
lowercase_ = loss_ignore_index
def _lowercase ( self : List[str] ) -> List[str]:
lowercase_ = copy.deepcopy(self.__dict__ )
lowercase_ = self.backbone_config.to_dict()
lowercase_ = self.__class__.model_type
return output
| 30 | 0 |
"""simple docstring"""
def a_ ( _lowercase = 100 ):
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[Any] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 128 |
"""simple docstring"""
UpperCamelCase_ =[
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 128 | 1 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : int ) -> float:
'''simple docstring'''
UpperCAmelCase_ = x
UpperCAmelCase_ = y
for step in range(snake_case_ ): # noqa: B007
UpperCAmelCase_ = a * a - b * b + x
UpperCAmelCase_ = 2 * a * b + y
UpperCAmelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCAmelCase_ ( snake_case_ : float ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def lowerCAmelCase_ ( snake_case_ : float ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(snake_case_ , 1 , 1 ) )
def lowerCAmelCase_ ( snake_case_ : int = 8_00 , snake_case_ : int = 6_00 , snake_case_ : float = -0.6 , snake_case_ : float = 0 , snake_case_ : float = 3.2 , snake_case_ : int = 50 , snake_case_ : bool = True , ) -> Image.Image:
'''simple docstring'''
UpperCAmelCase_ = Image.new("RGB" , (image_width, image_height) )
UpperCAmelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(snake_case_ ):
for image_y in range(snake_case_ ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase_ = figure_width / image_width * image_height
UpperCAmelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase_ = get_distance(snake_case_ , snake_case_ , snake_case_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase_ = get_color_coded_rgb(snake_case_ )
else:
UpperCAmelCase_ = get_black_and_white_rgb(snake_case_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
SCREAMING_SNAKE_CASE_: Tuple =get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 1 |
'''simple docstring'''
from typing import Any
class lowercase__ :
'''simple docstring'''
def __init__( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Dict = data
_SCREAMING_SNAKE_CASE : Optional[int] = None
def __repr__( self ):
return f"""Node({self.data})"""
class lowercase__ :
'''simple docstring'''
def __init__( self ):
_SCREAMING_SNAKE_CASE : Any = None
def __iter__( self ):
_SCREAMING_SNAKE_CASE : Any = self.head
while node:
yield node.data
_SCREAMING_SNAKE_CASE : List[Any] = node.next
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join([str(__snake_case ) for item in self] )
def __getitem__( self , __snake_case ):
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , __snake_case , __snake_case ):
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
_SCREAMING_SNAKE_CASE : Any = self.head
for _ in range(__snake_case ):
_SCREAMING_SNAKE_CASE : List[Any] = current.next
_SCREAMING_SNAKE_CASE : Dict = data
def UpperCAmelCase_ ( self , __snake_case ):
self.insert_nth(len(self ) , __snake_case )
def UpperCAmelCase_ ( self , __snake_case ):
self.insert_nth(0 , __snake_case )
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
_SCREAMING_SNAKE_CASE : Optional[int] = Node(__snake_case )
if self.head is None:
_SCREAMING_SNAKE_CASE : str = new_node
elif index == 0:
_SCREAMING_SNAKE_CASE : Tuple = self.head # link new_node to head
_SCREAMING_SNAKE_CASE : str = new_node
else:
_SCREAMING_SNAKE_CASE : Tuple = self.head
for _ in range(index - 1 ):
_SCREAMING_SNAKE_CASE : List[str] = temp.next
_SCREAMING_SNAKE_CASE : Tuple = temp.next
_SCREAMING_SNAKE_CASE : Dict = new_node
def UpperCAmelCase_ ( self ): # print every node data
print(self )
def UpperCAmelCase_ ( self ):
return self.delete_nth(0 )
def UpperCAmelCase_ ( self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def UpperCAmelCase_ ( self , __snake_case = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
_SCREAMING_SNAKE_CASE : Tuple = self.head # default first node
if index == 0:
_SCREAMING_SNAKE_CASE : List[Any] = self.head.next
else:
_SCREAMING_SNAKE_CASE : Tuple = self.head
for _ in range(index - 1 ):
_SCREAMING_SNAKE_CASE : Any = temp.next
_SCREAMING_SNAKE_CASE : Any = temp.next
_SCREAMING_SNAKE_CASE : Union[str, Any] = temp.next.next
return delete_node.data
def UpperCAmelCase_ ( self ):
return self.head is None
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Dict = None
_SCREAMING_SNAKE_CASE : int = self.head
while current:
# Store the current node's next node.
_SCREAMING_SNAKE_CASE : List[Any] = current.next
# Make the current node's next point backwards
_SCREAMING_SNAKE_CASE : Tuple = prev
# Make the previous node be the current node
_SCREAMING_SNAKE_CASE : int = current
# Make the current node the next node (to progress iteration)
_SCREAMING_SNAKE_CASE : Any = next_node
# Return prev in order to put the head at the end
_SCREAMING_SNAKE_CASE : Union[str, Any] = prev
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(SCREAMING_SNAKE_CASE__ ) == i
linked_list.insert_nth(SCREAMING_SNAKE_CASE__ , i + 1 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(SCREAMING_SNAKE_CASE__ ) == 9
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_SCREAMING_SNAKE_CASE : List[str] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(-8 , 1 ) )
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = [
-9,
100,
Node(7734_5112 ),
"""dlrow olleH""",
7,
5555,
0,
-1_9_2.5_5_5_5_5,
"""Hello, world!""",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
_SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
for i in test_input:
linked_list.insert_tail(SCREAMING_SNAKE_CASE__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(SCREAMING_SNAKE_CASE__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_SCREAMING_SNAKE_CASE : Any = linked_list.delete_head()
assert result == -9
assert (
str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_SCREAMING_SNAKE_CASE : List[Any] = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_SCREAMING_SNAKE_CASE : Optional[int] = linked_list.delete_nth(10 )
assert result is None
assert (
str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(SCREAMING_SNAKE_CASE__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(SCREAMING_SNAKE_CASE__ )
assert (
str(SCREAMING_SNAKE_CASE__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(SCREAMING_SNAKE_CASE__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def snake_case_ ( ):
"""simple docstring"""
from doctest import testmod
testmod()
_SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(SCREAMING_SNAKE_CASE__ )
print("""\nReading/changing Node data using indexing:""" )
print(f"""Element at Position 1: {linked_list[1]}""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(SCREAMING_SNAKE_CASE__ )
print(f"""length of linked_list is : {len(SCREAMING_SNAKE_CASE__ )}""" )
if __name__ == "__main__":
main()
| 200 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'speech_to_text_2'
lowerCamelCase = ['past_key_values']
lowerCamelCase = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : int,lowercase_ : Tuple=1_0_0_0_0,lowercase_ : Tuple=6,lowercase_ : Optional[Any]=2_0_4_8,lowercase_ : Dict=4,lowercase_ : List[Any]=0.0,lowercase_ : str=True,lowercase_ : Any="relu",lowercase_ : Optional[int]=2_5_6,lowercase_ : int=0.1,lowercase_ : Any=0.0,lowercase_ : Tuple=0.0,lowercase_ : Union[str, Any]=0.02,lowercase_ : List[Any]=2,lowercase_ : Optional[Any]=True,lowercase_ : str=1,lowercase_ : Any=0,lowercase_ : Optional[Any]=2,lowercase_ : Optional[Any]=1_0_2_4,**lowercase_ : int,)-> Optional[Any]:
'''simple docstring'''
A__ = vocab_size
A__ = d_model
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = decoder_layerdrop
A__ = use_cache
A__ = decoder_layers
A__ = scale_embedding # scale factor will be sqrt(d_model) if True
A__ = max_target_positions
super().__init__(
pad_token_id=lowercase_,bos_token_id=lowercase_,eos_token_id=lowercase_,decoder_start_token_id=lowercase_,**lowercase_,)
| 368 |
import argparse
import struct
import unittest
class A :
"""simple docstring"""
def __init__( self : Any,lowercase_ : bytes )-> None:
'''simple docstring'''
A__ = data
# Initialize hash values
A__ = [
0X6_a_0_9_e_6_6_7,
0Xb_b_6_7_a_e_8_5,
0X3_c_6_e_f_3_7_2,
0Xa_5_4_f_f_5_3_a,
0X5_1_0_e_5_2_7_f,
0X9_b_0_5_6_8_8_c,
0X1_f_8_3_d_9_a_b,
0X5_b_e_0_c_d_1_9,
]
# Initialize round constants
A__ = [
0X4_2_8_a_2_f_9_8,
0X7_1_3_7_4_4_9_1,
0Xb_5_c_0_f_b_c_f,
0Xe_9_b_5_d_b_a_5,
0X3_9_5_6_c_2_5_b,
0X5_9_f_1_1_1_f_1,
0X9_2_3_f_8_2_a_4,
0Xa_b_1_c_5_e_d_5,
0Xd_8_0_7_a_a_9_8,
0X1_2_8_3_5_b_0_1,
0X2_4_3_1_8_5_b_e,
0X5_5_0_c_7_d_c_3,
0X7_2_b_e_5_d_7_4,
0X8_0_d_e_b_1_f_e,
0X9_b_d_c_0_6_a_7,
0Xc_1_9_b_f_1_7_4,
0Xe_4_9_b_6_9_c_1,
0Xe_f_b_e_4_7_8_6,
0X0_f_c_1_9_d_c_6,
0X2_4_0_c_a_1_c_c,
0X2_d_e_9_2_c_6_f,
0X4_a_7_4_8_4_a_a,
0X5_c_b_0_a_9_d_c,
0X7_6_f_9_8_8_d_a,
0X9_8_3_e_5_1_5_2,
0Xa_8_3_1_c_6_6_d,
0Xb_0_0_3_2_7_c_8,
0Xb_f_5_9_7_f_c_7,
0Xc_6_e_0_0_b_f_3,
0Xd_5_a_7_9_1_4_7,
0X0_6_c_a_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_b_7_0_a_8_5,
0X2_e_1_b_2_1_3_8,
0X4_d_2_c_6_d_f_c,
0X5_3_3_8_0_d_1_3,
0X6_5_0_a_7_3_5_4,
0X7_6_6_a_0_a_b_b,
0X8_1_c_2_c_9_2_e,
0X9_2_7_2_2_c_8_5,
0Xa_2_b_f_e_8_a_1,
0Xa_8_1_a_6_6_4_b,
0Xc_2_4_b_8_b_7_0,
0Xc_7_6_c_5_1_a_3,
0Xd_1_9_2_e_8_1_9,
0Xd_6_9_9_0_6_2_4,
0Xf_4_0_e_3_5_8_5,
0X1_0_6_a_a_0_7_0,
0X1_9_a_4_c_1_1_6,
0X1_e_3_7_6_c_0_8,
0X2_7_4_8_7_7_4_c,
0X3_4_b_0_b_c_b_5,
0X3_9_1_c_0_c_b_3,
0X4_e_d_8_a_a_4_a,
0X5_b_9_c_c_a_4_f,
0X6_8_2_e_6_f_f_3,
0X7_4_8_f_8_2_e_e,
0X7_8_a_5_6_3_6_f,
0X8_4_c_8_7_8_1_4,
0X8_c_c_7_0_2_0_8,
0X9_0_b_e_f_f_f_a,
0Xa_4_5_0_6_c_e_b,
0Xb_e_f_9_a_3_f_7,
0Xc_6_7_1_7_8_f_2,
]
A__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def snake_case__ ( lowercase_ : bytes )-> bytes:
'''simple docstring'''
A__ = B'\x80' + (B'\x00' * (6_3 - (len(lowercase_ ) + 8) % 6_4))
A__ = struct.pack('>Q',(len(lowercase_ ) * 8) )
return data + padding + big_endian_integer
def snake_case__ ( self : Optional[int] )-> None:
'''simple docstring'''
A__ = [
self.preprocessed_data[x : x + 6_4]
for x in range(0,len(self.preprocessed_data ),6_4 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A__ = list(struct.unpack('>16L',lowercase_ ) )
# add 48 0-ed integers
words += [0] * 4_8
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes
for index in range(0,6_4 ):
if index > 1_5:
# modify the zero-ed indexes at the end of the array
A__ = (
self.ror(words[index - 1_5],7 )
^ self.ror(words[index - 1_5],1_8 )
^ (words[index - 1_5] >> 3)
)
A__ = (
self.ror(words[index - 2],1_7 )
^ self.ror(words[index - 2],1_9 )
^ (words[index - 2] >> 1_0)
)
A__ = (
words[index - 1_6] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
A__ = self.ror(lowercase_,6 ) ^ self.ror(lowercase_,1_1 ) ^ self.ror(lowercase_,2_5 )
A__ = (e & f) ^ ((~e & 0Xf_f_f_f_f_f_f_f) & g)
A__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
A__ = self.ror(lowercase_,2 ) ^ self.ror(lowercase_,1_3 ) ^ self.ror(lowercase_,2_2 )
A__ = (a & b) ^ (a & c) ^ (b & c)
A__ = (sa + maj) % 0X1_0_0_0_0_0_0_0_0
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = (
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
A__ = [a, b, c, d, e, f, g, h]
# Modify final values
A__ = [
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
A__ = ''.join([hex(lowercase_ )[2:].zfill(8 ) for value in self.hashes] )
def snake_case__ ( self : Union[str, Any],lowercase_ : int,lowercase_ : int )-> int:
'''simple docstring'''
return 0Xf_f_f_f_f_f_f_f & (value << (3_2 - rotations)) | (value >> rotations)
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] )-> None:
'''simple docstring'''
import hashlib
A__ = bytes('Test String','utf-8' )
self.assertEqual(SHAaaa(lowercase_ ).hash,hashlib.shaaaa(lowercase_ ).hexdigest() )
def _snake_case( ) -> None:
'''simple docstring'''
import doctest
doctest.testmod()
A__ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A__ = parser.parse_args()
A__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A__ = f.read()
else:
A__ = bytes(SCREAMING_SNAKE_CASE__ , 'utf-8' )
print(SHAaaa(SCREAMING_SNAKE_CASE__ ).hash )
if __name__ == "__main__":
main()
| 282 | 0 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger()
@dataclass
class _a :
_a : nn.Module
_a : List[nn.Module] = field(default_factory=_lowercase)
_a : list = field(default_factory=_lowercase)
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple )-> List[Any]:
lowerCAmelCase__ : Tuple = len(list(m.modules() ) ) == 1 or isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ) or isinstance(SCREAMING_SNAKE_CASE_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(SCREAMING_SNAKE_CASE_ )
def __call__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] )-> List[Any]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(SCREAMING_SNAKE_CASE_ )
[x.remove() for x in self.handles]
return self
@property
def UpperCAmelCase__( self : str )-> Tuple:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _SCREAMING_SNAKE_CASE : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _a :
_a : nn.Module
_a : nn.Module
_a : int = 0
_a : List = field(default_factory=_lowercase)
_a : List = field(default_factory=_lowercase)
def __call__( self : Dict , _SCREAMING_SNAKE_CASE : List[Any] )-> Any:
lowerCAmelCase__ : Tuple = Tracker(self.dest )(SCREAMING_SNAKE_CASE_ ).parametrized
lowerCAmelCase__ : List[Any] = Tracker(self.src )(SCREAMING_SNAKE_CASE_ ).parametrized
lowerCAmelCase__ : Optional[Any] = list(filter(lambda _SCREAMING_SNAKE_CASE : type(SCREAMING_SNAKE_CASE_ ) not in self.src_skip , SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ : Dict = list(filter(lambda _SCREAMING_SNAKE_CASE : type(SCREAMING_SNAKE_CASE_ ) not in self.dest_skip , SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise Exception(
F'Numbers of operations are different. Source module has {len(SCREAMING_SNAKE_CASE_ )} operations while'
F' destination module has {len(SCREAMING_SNAKE_CASE_ )}.' )
for dest_m, src_m in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def lowerCamelCase_ ( _a , _a , _a , _a = True ):
"""simple docstring"""
print(f'Converting {name}...' )
with torch.no_grad():
lowerCAmelCase__ : int = timm.create_model(SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ ).eval()
lowerCAmelCase__ : Tuple = ResNetForImageClassification(SCREAMING_SNAKE_CASE__ ).eval()
lowerCAmelCase__ : List[Any] = ModuleTransfer(src=SCREAMING_SNAKE_CASE__ , dest=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Tuple = torch.randn((1, 3, 224, 224) )
module_transfer(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(from_model(SCREAMING_SNAKE_CASE__ ) , our_model(SCREAMING_SNAKE_CASE__ ).logits ), "The model logits don't match the original one."
lowerCAmelCase__ : int = f'resnet{"-".join(name.split("resnet" ) )}'
print(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
# we can use the convnext one
lowerCAmelCase__ : Dict = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
print(f'Pushed {checkpoint_name}' )
def lowerCamelCase_ ( _a , _a = None , _a = True ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = '''imagenet-1k-id2label.json'''
lowerCAmelCase__ : int = 1_000
lowerCAmelCase__ : str = (1, num_labels)
lowerCAmelCase__ : Any = '''huggingface/label-files'''
lowerCAmelCase__ : Union[str, Any] = num_labels
lowerCAmelCase__ : List[str] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase__ : int = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[Any] = idalabel
lowerCAmelCase__ : List[str] = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Union[str, Any] = partial(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Dict = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(SCREAMING_SNAKE_CASE__ , names_to_config[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
lowerCamelCase = parser.parse_args()
lowerCamelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 131 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = """https://openaipublic.azureedge.net/jukebox/models/"""
__snake_case = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] ):
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
UpperCamelCase :int = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
UpperCamelCase :Union[str, Any] = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
UpperCamelCase :Optional[Any] = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
UpperCamelCase :Optional[int] = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
UpperCamelCase :Any = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
UpperCamelCase :int = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
UpperCamelCase :Any = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
UpperCamelCase :str = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :Optional[int] = {}
import re
UpperCamelCase :int = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
UpperCamelCase :str = re.compile(
R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
UpperCamelCase :int = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
UpperCamelCase :Tuple = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
UpperCamelCase :int = re.compile(
R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
UpperCamelCase :Optional[int] = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
UpperCamelCase :Optional[Any] = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
UpperCamelCase :int = re.compile(
R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
UpperCamelCase :Tuple = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :int = re_encoder_block_conv_in.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = regex_match.groups()
UpperCamelCase :List[str] = int(groups[2] ) * 2 + int(groups[3] )
UpperCamelCase :List[Any] = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
UpperCamelCase :int = re_encoder_block_conv_in.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif re_encoder_block_resnet.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[Any] = re_encoder_block_resnet.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[Any] = regex_match.groups()
UpperCamelCase :Any = int(groups[2] ) * 2 + int(groups[3] )
UpperCamelCase :Any = {'''1''': 1, '''3''': 2}[groups[-2]]
UpperCamelCase :str = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
UpperCamelCase :List[str] = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
UpperCamelCase :Union[str, Any] = prefix + resnet_block
UpperCamelCase :str = re_encoder_block_resnet.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif re_encoder_block_proj_out.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[int] = re_encoder_block_proj_out.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :int = regex_match.groups()
UpperCamelCase :int = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
UpperCamelCase :str = re_encoder_block_proj_out.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[Any] = re_decoder_block_conv_out.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = regex_match.groups()
UpperCamelCase :str = int(groups[2] ) * 2 + int(groups[3] ) - 2
UpperCamelCase :List[Any] = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
UpperCamelCase :Union[str, Any] = re_decoder_block_conv_out.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif re_decoder_block_resnet.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[Any] = re_decoder_block_resnet.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Any = regex_match.groups()
UpperCamelCase :List[str] = int(groups[2] ) * 2 + int(groups[3] ) - 2
UpperCamelCase :Optional[int] = {'''1''': 1, '''3''': 2}[groups[-2]]
UpperCamelCase :Any = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
UpperCamelCase :Optional[int] = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
UpperCamelCase :Any = prefix + resnet_block
UpperCamelCase :Optional[int] = re_decoder_block_resnet.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif re_decoder_block_proj_in.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[int] = re_decoder_block_proj_in.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[Any] = regex_match.groups()
UpperCamelCase :List[Any] = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
UpperCamelCase :Any = re_decoder_block_proj_in.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[Any] = re_prior_cond_conv_out.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = regex_match.groups()
UpperCamelCase :str = int(groups[1] ) * 2 + int(groups[2] ) - 2
UpperCamelCase :Tuple = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
UpperCamelCase :int = re_prior_cond_conv_out.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif re_prior_cond_resnet.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :int = re_prior_cond_resnet.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Any = regex_match.groups()
UpperCamelCase :Optional[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
UpperCamelCase :int = {'''1''': 1, '''3''': 2}[groups[-2]]
UpperCamelCase :Tuple = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
UpperCamelCase :List[Any] = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
UpperCamelCase :Any = prefix + resnet_block
UpperCamelCase :Dict = re_prior_cond_resnet.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif re_prior_cond_proj_in.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :List[str] = re_prior_cond_proj_in.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = regex_match.groups()
UpperCamelCase :Dict = F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
UpperCamelCase :Any = re_prior_cond_proj_in.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# keep original key
else:
UpperCamelCase :List[str] = original_key
UpperCamelCase :Any = replace_key(SCREAMING_SNAKE_CASE__ )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
UpperCamelCase :Union[str, Any] = model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
UpperCamelCase :List[Any] = original_key
UpperCamelCase :Any = original_key
UpperCamelCase :Optional[int] = value
return new_dict
@torch.no_grad()
def _A ( SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Dict=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ):
UpperCamelCase :Dict = requests.get(F'''{PREFIX}{file}''' , allow_redirects=SCREAMING_SNAKE_CASE__ )
os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=SCREAMING_SNAKE_CASE__ )
open(F'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' , '''wb''' ).write(r.content )
UpperCamelCase :Optional[int] = MODEL_MAPPING[model_name.split('''/''' )[-1]]
UpperCamelCase :Any = JukeboxConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = JukeboxModel(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Dict = []
UpperCamelCase :List[Any] = {}
for i, dict_name in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :int = torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}''' )['''model''']
UpperCamelCase :Tuple = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
UpperCamelCase :Optional[int] = old_dic[k]
elif k.endswith('''.w''' ):
UpperCamelCase :Optional[Any] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
UpperCamelCase :Optional[Any] = old_dic[k]
else:
UpperCamelCase :Any = old_dic[k]
UpperCamelCase :Any = '''vqvae''' if i == 0 else F'''priors.{3 - i}'''
UpperCamelCase :Dict = fix_jukebox_keys(SCREAMING_SNAKE_CASE__ , model.state_dict() , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
weight_dict.append(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = weight_dict.pop(0 )
model.vqvae.load_state_dict(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
with open(F'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
return weight_dict
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
__snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 259 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 47 |
import copy
import re
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = 'hp'
lowerCAmelCase__ = {}
lowerCAmelCase__ = None
@classmethod
def SCREAMING_SNAKE_CASE_( cls , lowercase , lowercase ) -> Tuple:
lowerCamelCase_ = prefix
lowerCamelCase_ = defaults
cls.build_naming_info()
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase , lowercase ) -> Optional[Any]:
if len(lowercase ) == 0:
return ""
lowerCamelCase_ = None
if any(char.isdigit() for char in word ):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(lowercase ) + 1 ):
lowerCamelCase_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
lowerCamelCase_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowercase ):
lowerCamelCase_ = ""
while integer != 0:
lowerCamelCase_ = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
lowerCamelCase_ = 0
while True:
lowerCamelCase_ = word + "#" + int_to_alphabetic(lowercase )
if sword in info["reverse_short_word"]:
continue
else:
lowerCamelCase_ = sword
break
lowerCamelCase_ = short_word
lowerCamelCase_ = word
return short_word
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase , lowercase ) -> int:
lowerCamelCase_ = param_name.split("_" )
lowerCamelCase_ = [TrialShortNamer.shortname_for_word(lowercase , lowercase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
lowerCamelCase_ = ["", "_"]
for separator in separators:
lowerCamelCase_ = separator.join(lowercase )
if shortname not in info["reverse_short_param"]:
lowerCamelCase_ = shortname
lowerCamelCase_ = param_name
return shortname
return param_name
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase , lowercase ) -> Optional[Any]:
lowerCamelCase_ = TrialShortNamer.shortname_for_key(lowercase , lowercase )
lowerCamelCase_ = short_name
lowerCamelCase_ = param_name
@classmethod
def SCREAMING_SNAKE_CASE_( cls ) -> Dict:
if cls.NAMING_INFO is not None:
return
lowerCamelCase_ = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
lowerCamelCase_ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowercase , lowercase )
lowerCamelCase_ = info
@classmethod
def SCREAMING_SNAKE_CASE_( cls , lowercase ) -> Optional[int]:
cls.build_naming_info()
assert cls.PREFIX is not None
lowerCamelCase_ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
lowerCamelCase_ = cls.NAMING_INFO["short_param"][k]
if isinstance(lowercase , lowercase ):
lowerCamelCase_ = 1 if v else 0
lowerCamelCase_ = "" if isinstance(lowercase , (int, float) ) else "-"
lowerCamelCase_ = f'{key}{sep}{v}'
name.append(lowercase )
return "_".join(lowercase )
@classmethod
def SCREAMING_SNAKE_CASE_( cls , lowercase ) -> List[Any]:
lowerCamelCase_ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
lowerCamelCase_ = []
else:
lowerCamelCase_ = repr.split("_" )
lowerCamelCase_ = {}
for value in values:
if "-" in value:
lowerCamelCase_ , lowerCamelCase_ = value.split("-" )
else:
lowerCamelCase_ = re.sub("[0-9.]" , "" , lowercase )
lowerCamelCase_ = float(re.sub("[^0-9.]" , "" , lowercase ) )
lowerCamelCase_ = cls.NAMING_INFO["reverse_short_param"][p_k]
lowerCamelCase_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
lowerCamelCase_ = cls.DEFAULTS[k]
return parameters
| 47 | 1 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
SCREAMING_SNAKE_CASE_: Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
SCREAMING_SNAKE_CASE_: Union[str, Any] ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
SCREAMING_SNAKE_CASE_: List[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _lowercase (self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def _lowercase (self : Tuple , __a : Optional[int] , __a : List[Any] ):
UpperCAmelCase_ = 0.0
for i, j in zip(__a , __a ):
n_correct += 1.0 if math_equivalence.is_equiv(__a , __a ) else 0.0
UpperCAmelCase_ = n_correct / len(__a )
return {
"accuracy": accuracy,
}
| 1 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : int , *__a : Dict , **__a : str ):
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 1 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
A__ : Tuple = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''tapas'''
def __init__( self : Optional[int] , __a : List[Any]=30522 , __a : int=768 , __a : int=12 , __a : Tuple=12 , __a : List[Any]=3072 , __a : str="gelu" , __a : Union[str, Any]=0.1 , __a : Union[str, Any]=0.1 , __a : Union[str, Any]=1024 , __a : int=[3, 256, 256, 2, 256, 256, 10] , __a : Dict=0.0_2 , __a : int=1e-12 , __a : str=0 , __a : Dict=1_0.0 , __a : Tuple=0 , __a : Dict=1.0 , __a : str=None , __a : List[Any]=1.0 , __a : Union[str, Any]=False , __a : Any=None , __a : Optional[Any]=1.0 , __a : Dict=1.0 , __a : Dict=False , __a : List[str]=False , __a : List[Any]="ratio" , __a : Tuple=None , __a : str=None , __a : Dict=64 , __a : str=32 , __a : List[Any]=False , __a : int=True , __a : List[str]=False , __a : Any=False , __a : Dict=True , __a : Dict=False , __a : int=None , __a : Optional[int]=None , **__a : List[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=__a , **__a )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__snake_case : Optional[int] = vocab_size
__snake_case : str = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : str = intermediate_size
__snake_case : Tuple = hidden_dropout_prob
__snake_case : List[str] = attention_probs_dropout_prob
__snake_case : Dict = max_position_embeddings
__snake_case : List[str] = type_vocab_sizes
__snake_case : str = initializer_range
__snake_case : Tuple = layer_norm_eps
# Fine-tuning task hyperparameters
__snake_case : Optional[Any] = positive_label_weight
__snake_case : Dict = num_aggregation_labels
__snake_case : Optional[int] = aggregation_loss_weight
__snake_case : List[str] = use_answer_as_supervision
__snake_case : int = answer_loss_importance
__snake_case : List[Any] = use_normalized_answer_loss
__snake_case : Tuple = huber_loss_delta
__snake_case : Union[str, Any] = temperature
__snake_case : Optional[Any] = aggregation_temperature
__snake_case : Tuple = use_gumbel_for_cells
__snake_case : List[str] = use_gumbel_for_aggregation
__snake_case : str = average_approximation_function
__snake_case : Optional[int] = cell_selection_preference
__snake_case : Union[str, Any] = answer_loss_cutoff
__snake_case : Dict = max_num_rows
__snake_case : int = max_num_columns
__snake_case : Optional[Any] = average_logits_per_cell
__snake_case : Any = select_one_column
__snake_case : Any = allow_empty_column_selection
__snake_case : str = init_cell_selection_weights_to_zero
__snake_case : Union[str, Any] = reset_position_index_per_cell
__snake_case : Optional[Any] = disable_per_token_loss
# Aggregation hyperparameters
__snake_case : Any = aggregation_labels
__snake_case : int = no_aggregation_label_index
if isinstance(self.aggregation_labels , __a ):
__snake_case : int = {int(__a ): v for k, v in aggregation_labels.items()}
| 0 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
A__ : Tuple = pytest.mark.integration
@require_faiss
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : Dict = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(__a ) for x in np.arange(30 ).tolist()]} )
return dset
def A_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
__snake_case : Dict = dset.map(
lambda __a , __a : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__a , keep_in_memory=__a )
__snake_case : List[Any] = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
__snake_case , __snake_case : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__snake_case , __snake_case : str = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(__a , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
from elasticsearch import Elasticsearch
__snake_case : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__snake_case : Any = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
__snake_case : Dict = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
__snake_case : Union[str, Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=__a )
__snake_case , __snake_case : str = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : str ) -> int:
'''simple docstring'''
import faiss
__snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__snake_case : Dict = np.zeros(5 , dtype=np.floataa )
__snake_case : List[str] = 1
__snake_case , __snake_case : List[Any] = index.search(__a )
self.assertRaises(__a , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__snake_case : List[str] = np.eye(5 , dtype=np.floataa )[::-1]
__snake_case , __snake_case : Dict = index.search_batch(__a )
self.assertRaises(__a , index.search_batch , queries[0] )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __a )
def A_ ( self : int ) -> int:
'''simple docstring'''
import faiss
__snake_case : int = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__snake_case : List[str] = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__a ):
__snake_case : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def A_ ( self : str ) -> Dict:
'''simple docstring'''
import faiss
__snake_case : Tuple = faiss.IndexFlat(5 )
__snake_case : List[Any] = FaissIndex(custom_index=__a )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
import faiss
__snake_case : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file:
index.save(tmp_file.name )
__snake_case : List[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__snake_case : List[Any] = np.zeros(5 , dtype=np.floataa )
__snake_case : Any = 1
__snake_case , __snake_case : int = index.search(__a )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def a_ ( _UpperCAmelCase : str ) -> Optional[int]:
import faiss
__snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
__snake_case : Dict = 'index.faiss'
__snake_case : Any = f'''mock://{index_name}'''
index.save(_UpperCAmelCase ,storage_options=mockfs.storage_options )
__snake_case : Any = FaissIndex.load(_UpperCAmelCase ,storage_options=mockfs.storage_options )
__snake_case : Any = np.zeros(5 ,dtype=np.floataa )
__snake_case : Any = 1
__snake_case , __snake_case : Tuple = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__snake_case : int = Elasticsearch()
__snake_case : Dict = {'acknowledged': True}
__snake_case : List[Any] = ElasticSearchIndex(es_client=__a )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__snake_case : Optional[Any] = 'foo'
__snake_case : int = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case : List[Any] = index.search(__a )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__snake_case : Dict = 'foo'
__snake_case : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case : Optional[Any] = index.search(__a , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__snake_case : List[Any] = ['foo', 'bar', 'foobar']
__snake_case : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case : Any = index.search_batch(__a )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([1, 1, 1] , __a )
# batched queries with timeout
__snake_case : Tuple = ['foo', 'bar', 'foobar']
__snake_case : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case : int = index.search_batch(__a , request_timeout=30 )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([1, 1, 1] , __a )
| 0 | 1 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__magic_name__ = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__magic_name__ = concatenate_datasets
__magic_name__ = DownloadConfig
__magic_name__ = DownloadManager
__magic_name__ = DownloadMode
__magic_name__ = DownloadConfig
__magic_name__ = DownloadMode
__magic_name__ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 100 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : List[Any] = ['''image_processor''', '''tokenizer''']
__lowercase : List[Any] = '''BridgeTowerImageProcessor'''
__lowercase : Tuple = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__):
super().__init__(lowerCAmelCase__ , lowerCAmelCase__)
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
# add pixel_values + pixel_mask
__SCREAMING_SNAKE_CASE = self.image_processor(
lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_center_crop=lowerCAmelCase__ , **lowerCAmelCase__)
encoding.update(lowerCAmelCase__)
return encoding
def snake_case_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__):
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__):
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__)
@property
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
__SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 100 | 1 |
"""simple docstring"""
from __future__ import annotations
from random import random
class lowercase__ :
def __init__( self : Any , snake_case__ : int | None = None ):
lowerCamelCase_ : str =value
lowerCamelCase_ : List[str] =random()
lowerCamelCase_ : Node | None =None
lowerCamelCase_ : Node | None =None
def __repr__( self : Union[str, Any] ):
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : Tuple ):
lowerCamelCase_ : Optional[Any] =str(self.value ) + " "
lowerCamelCase_ : Optional[int] =str(self.left or "" )
lowerCamelCase_ : List[Any] =str(self.right or "" )
return value + left + right
def _snake_case ( lowerCamelCase__ : Node | None , lowerCamelCase__ : int ) -> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowerCamelCase_ , lowerCamelCase_ : List[str] =split(root.left , lowerCamelCase__ )
return left, root
else:
lowerCamelCase_ , lowerCamelCase_ : List[str] =split(root.right , lowerCamelCase__ )
return root, right
def _snake_case ( lowerCamelCase__ : Node | None , lowerCamelCase__ : Node | None ) -> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowerCamelCase_ : str =merge(left.right , lowerCamelCase__ )
return left
else:
lowerCamelCase_ : Tuple =merge(lowerCamelCase__ , right.left )
return right
def _snake_case ( lowerCamelCase__ : Node | None , lowerCamelCase__ : int ) -> Node | None:
lowerCamelCase_ : Any =Node(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ : Optional[int] =split(lowerCamelCase__ , lowerCamelCase__ )
return merge(merge(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
def _snake_case ( lowerCamelCase__ : Node | None , lowerCamelCase__ : int ) -> Node | None:
lowerCamelCase_ , lowerCamelCase_ : str =split(lowerCamelCase__ , value - 1 )
lowerCamelCase_ , lowerCamelCase_ : Optional[int] =split(lowerCamelCase__ , lowerCamelCase__ )
return merge(lowerCamelCase__ , lowerCamelCase__ )
def _snake_case ( lowerCamelCase__ : Node | None ) -> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def _snake_case ( lowerCamelCase__ : Node | None , lowerCamelCase__ : str ) -> Node | None:
for arg in args.split():
if arg[0] == "+":
lowerCamelCase_ : Optional[int] =insert(lowerCamelCase__ , int(arg[1:] ) )
elif arg[0] == "-":
lowerCamelCase_ : Optional[Any] =erase(lowerCamelCase__ , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def _snake_case ( ) -> None:
lowerCamelCase_ : Optional[int] =None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
lowerCamelCase_ : str =input()
while args != "q":
lowerCamelCase_ : List[str] =interact_treap(lowerCamelCase__ , lowerCamelCase__ )
print(lowerCamelCase__ )
lowerCamelCase_ : List[str] =input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 209 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A__ : Tuple = {'tokenization_byt5': ['ByT5Tokenizer']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
A__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 209 | 1 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE :Optional[int] = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
class __lowerCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 'mask2former'
_SCREAMING_SNAKE_CASE = ['swin']
_SCREAMING_SNAKE_CASE = {'hidden_size': 'hidden_dim'}
def __init__( self : Optional[Any] , _lowerCAmelCase : Optional[Dict] = None , _lowerCAmelCase : int = 2_5_6 , _lowerCAmelCase : int = 2_5_6 , _lowerCAmelCase : int = 2_5_6 , _lowerCAmelCase : int = 1_0_2_4 , _lowerCAmelCase : str = "relu" , _lowerCAmelCase : int = 6 , _lowerCAmelCase : int = 1_0 , _lowerCAmelCase : int = 8 , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : int = 2_0_4_8 , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 2_5_5 , _lowerCAmelCase : int = 1_0_0 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 2.0 , _lowerCAmelCase : float = 5.0 , _lowerCAmelCase : float = 5.0 , _lowerCAmelCase : int = 1_2_5_4_4 , _lowerCAmelCase : float = 3.0 , _lowerCAmelCase : float = 0.75 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : float = 1.0 , _lowerCAmelCase : bool = True , _lowerCAmelCase : List[int] = [4, 8, 1_6, 3_2] , _lowerCAmelCase : bool = None , **_lowerCAmelCase : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
snake_case_ = CONFIG_MAPPING['''swin'''](
image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__A , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(__A , __A ):
snake_case_ = backbone_config.pop("model_type" )
snake_case_ = CONFIG_MAPPING[backbone_model_type]
snake_case_ = config_class.from_dict(__A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {','.join(self.backbones_supported )}''' )
snake_case_ = backbone_config
snake_case_ = feature_size
snake_case_ = mask_feature_size
snake_case_ = hidden_dim
snake_case_ = encoder_feedforward_dim
snake_case_ = activation_function
snake_case_ = encoder_layers
snake_case_ = decoder_layers
snake_case_ = num_attention_heads
snake_case_ = dropout
snake_case_ = dim_feedforward
snake_case_ = pre_norm
snake_case_ = enforce_input_projection
snake_case_ = common_stride
snake_case_ = ignore_value
snake_case_ = num_queries
snake_case_ = no_object_weight
snake_case_ = class_weight
snake_case_ = mask_weight
snake_case_ = dice_weight
snake_case_ = train_num_points
snake_case_ = oversample_ratio
snake_case_ = importance_sample_ratio
snake_case_ = init_std
snake_case_ = init_xavier_std
snake_case_ = use_auxiliary_loss
snake_case_ = feature_strides
snake_case_ = output_auxiliary_logits
snake_case_ = decoder_layers
super().__init__(**__A )
@classmethod
def lowerCAmelCase__ ( cls : str , _lowerCAmelCase : PretrainedConfig , **_lowerCAmelCase : Any ) -> List[str]:
"""simple docstring"""
return cls(
backbone_config=__A , **__A , )
def lowerCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 159 |
def a__ ( UpperCAmelCase : int ) -> int:
UpperCAmelCase : list[list[int]] = [[0 for _ in range(UpperCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , UpperCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_lowerCamelCase : List[Any] = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
_lowerCamelCase : str = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 336 | 0 |
'''simple docstring'''
def _snake_case ( A ) -> str:
return "".join(chr(ord(A ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod() | 228 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( A , A , A ) -> Optional[Any]:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def _snake_case ( A , A , A ) -> Union[str, Any]:
lowerCAmelCase__ = to_pil_image(A )
lowerCAmelCase__ , lowerCAmelCase__ = pil_image.size
lowerCAmelCase__ = pytesseract.image_to_data(A , lang=A , output_type='''dict''' , config=A )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowerCAmelCase__ = [idx for idx, word in enumerate(A ) if not word.strip()]
lowerCAmelCase__ = [word for idx, word in enumerate(A ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCAmelCase__ = []
for x, y, w, h in zip(A , A , A , A ):
lowerCAmelCase__ = [x, y, x + w, y + h]
actual_boxes.append(A )
# finally, normalize the bounding boxes
lowerCAmelCase__ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(A , A , A ) )
assert len(A ) == len(A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Any = ["pixel_values"]
def __init__( self , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = PILImageResampling.BILINEAR , lowerCamelCase_ = True , lowerCamelCase_ = 1 / 2_55 , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = "" , **lowerCamelCase_ , ) -> None:
super().__init__(**lowerCamelCase_ )
lowerCAmelCase__ = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_value
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowerCAmelCase__ = apply_ocr
lowerCAmelCase__ = ocr_lang
lowerCAmelCase__ = tesseract_config
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = PILImageResampling.BILINEAR , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowerCAmelCase__ = (size['''height'''], size['''width'''])
return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_=None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = ChannelDimension.FIRST , **lowerCamelCase_ , ) -> PIL.Image.Image:
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ )
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCAmelCase__ = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCAmelCase__ = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCAmelCase__ = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(lowerCamelCase_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for image in images:
lowerCAmelCase__ , lowerCAmelCase__ = apply_tesseract(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
words_batch.append(lowerCamelCase_ )
boxes_batch.append(lowerCamelCase_ )
if do_resize:
lowerCAmelCase__ = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
lowerCAmelCase__ = BatchFeature(data={'''pixel_values''': images} , tensor_type=lowerCamelCase_ )
if apply_ocr:
lowerCAmelCase__ = words_batch
lowerCAmelCase__ = boxes_batch
return data | 228 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def a_ ( _lowerCAmelCase ) -> int:
__lowerCamelCase : Optional[int] = 384
__lowerCamelCase : List[str] = 7
if "tiny" in model_name:
__lowerCamelCase : List[str] = 96
__lowerCamelCase : Optional[int] = (2, 2, 6, 2)
__lowerCamelCase : List[str] = (3, 6, 12, 24)
elif "small" in model_name:
__lowerCamelCase : str = 96
__lowerCamelCase : Tuple = (2, 2, 18, 2)
__lowerCamelCase : List[str] = (3, 6, 12, 24)
elif "base" in model_name:
__lowerCamelCase : Tuple = 128
__lowerCamelCase : int = (2, 2, 18, 2)
__lowerCamelCase : Any = (4, 8, 16, 32)
__lowerCamelCase : List[str] = 12
__lowerCamelCase : Optional[Any] = 512
elif "large" in model_name:
__lowerCamelCase : Union[str, Any] = 192
__lowerCamelCase : List[Any] = (2, 2, 18, 2)
__lowerCamelCase : Tuple = (6, 12, 24, 48)
__lowerCamelCase : Optional[Any] = 12
__lowerCamelCase : List[str] = 768
# set label information
__lowerCamelCase : Union[str, Any] = 150
__lowerCamelCase : List[Any] = 'huggingface/label-files'
__lowerCamelCase : Optional[int] = 'ade20k-id2label.json'
__lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(_lowerCAmelCase ,_lowerCAmelCase ,repo_type='dataset' ) ,'r' ) )
__lowerCamelCase : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
__lowerCamelCase : str = {v: k for k, v in idalabel.items()}
__lowerCamelCase : Optional[Any] = SwinConfig(
embed_dim=_lowerCAmelCase ,depths=_lowerCAmelCase ,num_heads=_lowerCAmelCase ,window_size=_lowerCAmelCase ,out_features=['stage1', 'stage2', 'stage3', 'stage4'] ,)
__lowerCamelCase : Dict = UperNetConfig(
backbone_config=_lowerCAmelCase ,auxiliary_in_channels=_lowerCAmelCase ,num_labels=_lowerCAmelCase ,idalabel=_lowerCAmelCase ,labelaid=_lowerCAmelCase ,)
return config
def a_ ( _lowerCAmelCase ) -> Union[str, Any]:
__lowerCamelCase : List[str] = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.norm1.weight', F'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.norm1.bias', F'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', F'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', F'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', F'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', F'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.norm2.weight', F'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.norm2.bias', F'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', F'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', F'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', F'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', F'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.stages.{i}.downsample.reduction.weight', F'backbone.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.stages.{i}.downsample.norm.weight', F'backbone.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.stages.{i}.downsample.norm.bias', F'backbone.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> List[str]:
__lowerCamelCase : Dict = dct.pop(_lowerCAmelCase )
__lowerCamelCase : List[Any] = val
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Optional[Any]:
__lowerCamelCase : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowerCamelCase : str = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowerCamelCase : Optional[int] = state_dict.pop(F'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' )
__lowerCamelCase : Union[str, Any] = state_dict.pop(F'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase : Union[str, Any] = in_proj_weight[:dim, :]
__lowerCamelCase : str = in_proj_bias[: dim]
__lowerCamelCase : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
__lowerCamelCase : List[str] = in_proj_bias[
dim : dim * 2
]
__lowerCamelCase : Optional[Any] = in_proj_weight[
-dim :, :
]
__lowerCamelCase : str = in_proj_bias[-dim :]
# fmt: on
def a_ ( _lowerCAmelCase ) -> Dict:
__lowerCamelCase ,__lowerCamelCase : List[Any] = x.shape
__lowerCamelCase : Any = x.reshape(_lowerCAmelCase ,4 ,in_channel // 4 )
__lowerCamelCase : str = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_lowerCAmelCase ,_lowerCAmelCase )
return x
def a_ ( _lowerCAmelCase ) -> int:
__lowerCamelCase ,__lowerCamelCase : str = x.shape
__lowerCamelCase : Optional[int] = x.reshape(_lowerCAmelCase ,in_channel // 4 ,4 )
__lowerCamelCase : int = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_lowerCAmelCase ,_lowerCAmelCase )
return x
def a_ ( _lowerCAmelCase ) -> int:
__lowerCamelCase : int = x.shape[0]
__lowerCamelCase : int = x.reshape(4 ,in_channel // 4 )
__lowerCamelCase : Union[str, Any] = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_lowerCAmelCase )
return x
def a_ ( _lowerCAmelCase ) -> List[str]:
__lowerCamelCase : List[str] = x.shape[0]
__lowerCamelCase : Any = x.reshape(in_channel // 4 ,4 )
__lowerCamelCase : List[Any] = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_lowerCAmelCase )
return x
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> Optional[int]:
__lowerCamelCase : Tuple = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
__lowerCamelCase : Optional[Any] = model_name_to_url[model_name]
__lowerCamelCase : str = torch.hub.load_state_dict_from_url(_lowerCAmelCase ,map_location='cpu' ,file_name=_lowerCAmelCase )[
'state_dict'
]
for name, param in state_dict.items():
print(_lowerCAmelCase ,param.shape )
__lowerCamelCase : str = get_upernet_config(_lowerCAmelCase )
__lowerCamelCase : int = UperNetForSemanticSegmentation(_lowerCAmelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__lowerCamelCase : str = state_dict.pop(_lowerCAmelCase )
if "bn" in key:
__lowerCamelCase : List[str] = key.replace('bn' ,'batch_norm' )
__lowerCamelCase : Any = val
# rename keys
__lowerCamelCase : List[Any] = create_rename_keys(_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase ,config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__lowerCamelCase : int = reverse_correct_unfold_reduction_order(_lowerCAmelCase )
if "norm" in key:
__lowerCamelCase : Optional[Any] = reverse_correct_unfold_norm_order(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# verify on image
__lowerCamelCase : int = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
__lowerCamelCase : Union[str, Any] = Image.open(requests.get(_lowerCAmelCase ,stream=_lowerCAmelCase ).raw ).convert('RGB' )
__lowerCamelCase : str = SegformerImageProcessor()
__lowerCamelCase : List[Any] = processor(_lowerCAmelCase ,return_tensors='pt' ).pixel_values
with torch.no_grad():
__lowerCamelCase : Dict = model(_lowerCAmelCase )
__lowerCamelCase : Tuple = outputs.logits
print(logits.shape )
print('First values of logits:' ,logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__lowerCamelCase : Union[str, Any] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
__lowerCamelCase : Optional[Any] = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
__lowerCamelCase : Dict = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
__lowerCamelCase : List[Any] = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('Logits:' ,outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_lowerCAmelCase ,atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print(F'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(F'openmmlab/{model_name}' )
processor.push_to_hub(F'openmmlab/{model_name}' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[f'''upernet-swin-{size}''' for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_UpperCamelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 208 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_UpperCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Optional[int]:
if "xprophetnet" in prophetnet_checkpoint_path:
__lowerCamelCase : Union[str, Any] = XLMProphetNetForConditionalGenerationOld.from_pretrained(_lowerCAmelCase )
__lowerCamelCase ,__lowerCamelCase : List[str] = XLMProphetNetForConditionalGeneration.from_pretrained(
_lowerCAmelCase ,output_loading_info=_lowerCAmelCase )
else:
__lowerCamelCase : Optional[int] = ProphetNetForConditionalGenerationOld.from_pretrained(_lowerCAmelCase )
__lowerCamelCase ,__lowerCamelCase : List[str] = ProphetNetForConditionalGeneration.from_pretrained(
_lowerCAmelCase ,output_loading_info=_lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = ['key_proj', 'value_proj', 'query_proj']
__lowerCamelCase : Optional[Any] = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
__lowerCamelCase : Optional[int] = key.split('.' )
if attributes[0] == "lm_head":
__lowerCamelCase : Dict = prophet
__lowerCamelCase : List[Any] = prophet_old
else:
__lowerCamelCase : Any = prophet.prophetnet
__lowerCamelCase : Any = prophet_old.model
__lowerCamelCase : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
__lowerCamelCase : Any = mapping[attribute]
if not hasattr(_lowerCAmelCase ,_lowerCAmelCase ) and len(_lowerCAmelCase ) > 0:
__lowerCamelCase : int = attribute
elif hasattr(_lowerCAmelCase ,_lowerCAmelCase ):
__lowerCamelCase : Optional[int] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowerCamelCase : List[Any] = old_model.weight
logger.info(F'{attribute} is initialized.' )
__lowerCamelCase : List[Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowerCamelCase : List[Any] = old_model.bias
logger.info(F'{attribute} is initialized' )
__lowerCamelCase : Dict = True
break
elif attribute in special_keys and hasattr(_lowerCAmelCase ,'in_proj_weight' ):
__lowerCamelCase : Optional[Any] = old_model.in_proj_weight.shape[0] // 3
__lowerCamelCase : Optional[Any] = getattr(_lowerCAmelCase ,_lowerCAmelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowerCamelCase : Optional[int] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowerCamelCase : Dict = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowerCamelCase : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowerCamelCase : Dict = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowerCamelCase : str = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowerCamelCase : Optional[int] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowerCamelCase : Optional[int] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowerCamelCase : Optional[int] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowerCamelCase : Dict = True
break
if attribute.isdigit():
__lowerCamelCase : List[str] = model[int(_lowerCAmelCase )]
__lowerCamelCase : Union[str, Any] = old_model[int(_lowerCAmelCase )]
else:
__lowerCamelCase : Union[str, Any] = getattr(_lowerCAmelCase ,_lowerCAmelCase )
if old_attribute == "":
__lowerCamelCase : str = old_model
else:
if not hasattr(_lowerCAmelCase ,_lowerCAmelCase ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
__lowerCamelCase : str = getattr(_lowerCAmelCase ,_lowerCAmelCase )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 208 | 1 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCamelCase = sys.version_info >= (3, 10)
def lowercase_ ( _lowerCamelCase : Tuple=None , _lowerCamelCase : int=None):
return field(default_factory=lambda: default , metadata=_lowerCamelCase)
@dataclass
class snake_case_ :
__A : int
__A : float
__A : str
__A : bool
@dataclass
class snake_case_ :
__A : int = 42
__A : str = field(default="toto" ,metadata={"help": "help message"} )
@dataclass
class snake_case_ :
__A : bool = False
__A : bool = True
__A : Optional[bool] = None
class snake_case_ ( __A ):
__A : str = "titi"
__A : List[str] = "toto"
class snake_case_ ( __A ):
__A : Optional[int] = "titi"
__A : Union[str, Any] = "toto"
__A : str = 42
@dataclass
class snake_case_ :
__A : BasicEnum = "toto"
def __UpperCamelCase ( self : int ) -> List[Any]:
lowercase__ : str = BasicEnum(self.foo )
@dataclass
class snake_case_ :
__A : MixedTypeEnum = "toto"
def __UpperCamelCase ( self : str ) -> str:
lowercase__ : int = MixedTypeEnum(self.foo )
@dataclass
class snake_case_ :
__A : Optional[int] = None
__A : Optional[float] = field(default=__A ,metadata={"help": "help message"} )
__A : Optional[str] = None
__A : Optional[List[str]] = list_field(default=[] )
__A : Optional[List[int]] = list_field(default=[] )
@dataclass
class snake_case_ :
__A : List[int] = list_field(default=[] )
__A : List[int] = list_field(default=[1, 2, 3] )
__A : List[str] = list_field(default=["Hallo", "Bonjour", "Hello"] )
__A : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class snake_case_ :
__A : List[int] = field()
__A : str = field()
__A : BasicEnum = field()
def __UpperCamelCase ( self : Any ) -> int:
lowercase__ : int = BasicEnum(self.required_enum )
@dataclass
class snake_case_ :
__A : int
__A : "BasicEnum" = field()
__A : "Optional[bool]" = None
__A : "str" = field(default="toto" ,metadata={"help": "help message"} )
__A : "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class snake_case_ :
__A : bool = False
__A : bool = True
__A : bool | None = None
@dataclass
class snake_case_ :
__A : int | None = None
__A : float | None = field(default=__A ,metadata={"help": "help message"} )
__A : str | None = None
__A : list[str] | None = list_field(default=[] )
__A : list[int] | None = list_field(default=[] )
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : int , lowercase_ : argparse.ArgumentParser , lowercase_ : argparse.ArgumentParser ) -> Tuple:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowercase__ : Dict = {k: v for k, v in vars(lowercase_ ).items() if k != "container"}
lowercase__ : List[str] = {k: v for k, v in vars(lowercase_ ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , lowercase_ ) and yy.get("choices" , lowercase_ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](lowercase_ ) , yy["type"](lowercase_ ) )
del xx["type"], yy["type"]
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : List[Any] ) -> Dict:
lowercase__ : Any = HfArgumentParser(lowercase_ )
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("--foo" , type=lowercase_ , required=lowercase_ )
expected.add_argument("--bar" , type=lowercase_ , required=lowercase_ )
expected.add_argument("--baz" , type=lowercase_ , required=lowercase_ )
expected.add_argument("--flag" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="?" )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
(lowercase__ ) : str = parser.parse_args_into_dataclasses(lowercase_ , look_for_args_file=lowercase_ )
self.assertFalse(example.flag )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : Optional[Any] = HfArgumentParser(lowercase_ )
lowercase__ : List[str] = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=lowercase_ )
expected.add_argument("--baz" , default="toto" , type=lowercase_ , help="help message" )
self.argparsersEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : List[str] ) -> Dict:
lowercase__ : Tuple = argparse.ArgumentParser()
expected.add_argument("--foo" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="?" )
expected.add_argument("--baz" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=lowercase_ , dest="baz" )
expected.add_argument("--opt" , type=lowercase_ , default=lowercase_ )
lowercase__ : Union[str, Any] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_ )
for dataclass_type in dataclass_types:
lowercase__ : str = HfArgumentParser(lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase__ : Tuple = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase__ : Union[str, Any] = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase__ : Dict = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase__ : Optional[int] = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
lowercase__ : List[Any] = HfArgumentParser(lowercase_ )
lowercase__ : Dict = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase__ : List[Any] = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowercase__ : List[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowercase__ : Any = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowercase__ : int = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowercase__ : Dict = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
lowercase__ : Dict = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __UpperCamelCase ( self : Tuple ) -> List[str]:
@dataclass
class snake_case_ :
__A : Literal["titi", "toto", 42] = "toto"
lowercase__ : Union[str, Any] = HfArgumentParser(lowercase_ )
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase__ : List[Any] = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowercase__ : int = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowercase__ : Tuple = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
lowercase__ : int = HfArgumentParser(lowercase_ )
lowercase__ : Dict = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=lowercase_ )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=lowercase_ )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=lowercase_ )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase__ : Any = parser.parse_args([] )
self.assertEqual(
lowercase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
lowercase__ : List[Any] = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(lowercase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def __UpperCamelCase ( self : List[Any] ) -> int:
lowercase__ : Any = argparse.ArgumentParser()
expected.add_argument("--foo" , default=lowercase_ , type=lowercase_ )
expected.add_argument("--bar" , default=lowercase_ , type=lowercase_ , help="help message" )
expected.add_argument("--baz" , default=lowercase_ , type=lowercase_ )
expected.add_argument("--ces" , nargs="+" , default=[] , type=lowercase_ )
expected.add_argument("--des" , nargs="+" , default=[] , type=lowercase_ )
lowercase__ : str = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_ )
for dataclass_type in dataclass_types:
lowercase__ : Any = HfArgumentParser(lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase__ : List[Any] = parser.parse_args([] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , bar=lowercase_ , baz=lowercase_ , ces=[] , des=[] ) )
lowercase__ : Dict = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(lowercase_ , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
lowercase__ : List[str] = HfArgumentParser(lowercase_ )
lowercase__ : Any = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=lowercase_ , required=lowercase_ )
expected.add_argument("--required_str" , type=lowercase_ , required=lowercase_ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=lowercase_ , )
self.argparsersEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
lowercase__ : Optional[int] = HfArgumentParser(lowercase_ )
lowercase__ : Dict = argparse.ArgumentParser()
expected.add_argument("--foo" , type=lowercase_ , required=lowercase_ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=lowercase_ , )
expected.add_argument("--opt" , type=lowercase_ , default=lowercase_ )
expected.add_argument("--baz" , default="toto" , type=lowercase_ , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Dict ) -> str:
lowercase__ : str = HfArgumentParser(lowercase_ )
lowercase__ : List[Any] = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
lowercase__ : Optional[Any] = parser.parse_dict(lowercase_ )[0]
lowercase__ : Dict = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
lowercase__ : List[Any] = HfArgumentParser(lowercase_ )
lowercase__ : Any = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(lowercase_ , parser.parse_dict , lowercase_ , allow_extra_keys=lowercase_ )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
lowercase__ : Tuple = HfArgumentParser(lowercase_ )
lowercase__ : int = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Any = os.path.join(lowercase_ , "temp_json" )
os.mkdir(lowercase_ )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(lowercase_ , lowercase_ )
lowercase__ : Any = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
lowercase__ : Optional[Any] = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : str ) -> int:
lowercase__ : Tuple = HfArgumentParser(lowercase_ )
lowercase__ : Dict = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Optional[int] = os.path.join(lowercase_ , "temp_yaml" )
os.mkdir(lowercase_ )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(lowercase_ , lowercase_ )
lowercase__ : Any = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
lowercase__ : int = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : int ) -> int:
lowercase__ : List[str] = HfArgumentParser(lowercase_ )
self.assertIsNotNone(lowercase_ )
| 363 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Optional[Any] = DPTConfig()
if "large" in checkpoint_url:
lowercase__ : str = 1024
lowercase__ : List[str] = 4096
lowercase__ : List[Any] = 24
lowercase__ : Dict = 16
lowercase__ : Union[str, Any] = [5, 11, 17, 23]
lowercase__ : Any = [256, 512, 1024, 1024]
lowercase__ : Optional[int] = (1, 384, 384)
if "ade" in checkpoint_url:
lowercase__ : Union[str, Any] = True
lowercase__ : Tuple = 150
lowercase__ : Optional[int] = "huggingface/label-files"
lowercase__ : str = "ade20k-id2label.json"
lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r"))
lowercase__ : Union[str, Any] = {int(_lowerCamelCase): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = idalabel
lowercase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
lowercase__ : Tuple = [1, 150, 480, 480]
return config, expected_shape
def lowercase_ ( _lowerCamelCase : List[Any]):
lowercase__ : int = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Tuple):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase__ : Dict = name.replace("pretrained.model" , "dpt.encoder")
if "pretrained.model" in name:
lowercase__ : List[str] = name.replace("pretrained.model" , "dpt.embeddings")
if "patch_embed" in name:
lowercase__ : Any = name.replace("patch_embed" , "patch_embeddings")
if "pos_embed" in name:
lowercase__ : Union[str, Any] = name.replace("pos_embed" , "position_embeddings")
if "attn.proj" in name:
lowercase__ : Optional[int] = name.replace("attn.proj" , "attention.output.dense")
if "proj" in name and "project" not in name:
lowercase__ : int = name.replace("proj" , "projection")
if "blocks" in name:
lowercase__ : List[str] = name.replace("blocks" , "layer")
if "mlp.fc1" in name:
lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense")
if "mlp.fc2" in name:
lowercase__ : Optional[int] = name.replace("mlp.fc2" , "output.dense")
if "norm1" in name:
lowercase__ : List[str] = name.replace("norm1" , "layernorm_before")
if "norm2" in name:
lowercase__ : Dict = name.replace("norm2" , "layernorm_after")
if "scratch.output_conv" in name:
lowercase__ : Union[str, Any] = name.replace("scratch.output_conv" , "head")
if "scratch" in name:
lowercase__ : str = name.replace("scratch" , "neck")
if "layer1_rn" in name:
lowercase__ : int = name.replace("layer1_rn" , "convs.0")
if "layer2_rn" in name:
lowercase__ : int = name.replace("layer2_rn" , "convs.1")
if "layer3_rn" in name:
lowercase__ : Tuple = name.replace("layer3_rn" , "convs.2")
if "layer4_rn" in name:
lowercase__ : Union[str, Any] = name.replace("layer4_rn" , "convs.3")
if "refinenet" in name:
lowercase__ : Dict = int(name[len("neck.refinenet") : len("neck.refinenet") + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase__ : str = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4)}''')
if "out_conv" in name:
lowercase__ : str = name.replace("out_conv" , "projection")
if "resConfUnit1" in name:
lowercase__ : int = name.replace("resConfUnit1" , "residual_layer1")
if "resConfUnit2" in name:
lowercase__ : Optional[Any] = name.replace("resConfUnit2" , "residual_layer2")
if "conv1" in name:
lowercase__ : List[Any] = name.replace("conv1" , "convolution1")
if "conv2" in name:
lowercase__ : Tuple = name.replace("conv2" , "convolution2")
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0")
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase__ : Any = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0")
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0")
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase__ : List[Any] = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0")
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase__ : Union[str, Any] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection")
if "pretrained.act_postprocess1.4" in name:
lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize")
if "pretrained.act_postprocess2.3" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection")
if "pretrained.act_postprocess2.4" in name:
lowercase__ : str = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize")
if "pretrained.act_postprocess3.3" in name:
lowercase__ : Dict = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection")
if "pretrained.act_postprocess4.3" in name:
lowercase__ : Any = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection")
if "pretrained.act_postprocess4.4" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize")
if "pretrained" in name:
lowercase__ : Any = name.replace("pretrained" , "dpt")
if "bn" in name:
lowercase__ : str = name.replace("bn" , "batch_norm")
if "head" in name:
lowercase__ : Optional[Any] = name.replace("head" , "head.head")
if "encoder.norm" in name:
lowercase__ : Tuple = name.replace("encoder.norm" , "layernorm")
if "auxlayer" in name:
lowercase__ : int = name.replace("auxlayer" , "auxiliary_head.head")
return name
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str):
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''')
lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Optional[int] = in_proj_weight[: config.hidden_size, :]
lowercase__ : Optional[int] = in_proj_bias[: config.hidden_size]
lowercase__ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ : int = in_proj_bias[-config.hidden_size :]
def lowercase_ ( ):
lowercase__ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw)
return im
@torch.no_grad()
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict):
lowercase__ , lowercase__ : Optional[int] = get_dpt_config(_lowerCamelCase)
# load original state_dict from URL
lowercase__ : Tuple = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu")
# remove certain keys
remove_ignore_keys_(_lowerCamelCase)
# rename keys
for key in state_dict.copy().keys():
lowercase__ : List[str] = state_dict.pop(_lowerCamelCase)
lowercase__ : List[Any] = val
# read in qkv matrices
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase)
# load HuggingFace model
lowercase__ : Any = DPTForSemanticSegmentation(_lowerCamelCase) if "ade" in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase)
model.load_state_dict(_lowerCamelCase)
model.eval()
# Check outputs on an image
lowercase__ : Optional[Any] = 480 if "ade" in checkpoint_url else 384
lowercase__ : Union[str, Any] = DPTImageProcessor(size=_lowerCamelCase)
lowercase__ : List[str] = prepare_img()
lowercase__ : Dict = image_processor(_lowerCamelCase , return_tensors="pt")
# forward pass
lowercase__ : Tuple = model(**_lowerCamelCase).logits if "ade" in checkpoint_url else model(**_lowerCamelCase).predicted_depth
# Assert logits
lowercase__ : Union[str, Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]])
if "ade" in checkpoint_url:
lowercase__ : List[str] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]])
assert outputs.shape == torch.Size(_lowerCamelCase)
assert (
torch.allclose(outputs[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _lowerCamelCase)
)
Path(_lowerCamelCase).mkdir(exist_ok=_lowerCamelCase)
print(f'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(_lowerCamelCase)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(_lowerCamelCase)
if push_to_hub:
print("Pushing model to hub...")
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
UpperCamelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 333 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A: Optional[int] = logging.get_logger(__name__)
A: Union[str, Any] = {"vocab_file": "spiece.model"}
A: Optional[Any] = {
"vocab_file": {
"bert_for_seq_generation": (
"https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
),
}
}
A: List[str] = {"bert_for_seq_generation": 5_1_2}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase_ ):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Any = []
__lowerCAmelCase : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<::::>" , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> str:
'''simple docstring'''
UpperCAmelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Dict = vocab_file
UpperCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : int = self.__dict__.copy()
UpperCAmelCase : Union[str, Any] = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase : List[Any] = {}
UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Dict = self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
return token
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = []
UpperCAmelCase : Any = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
UpperCAmelCase : List[str] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Any:
'''simple docstring'''
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase : int = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , """wb""" ) as fi:
UpperCAmelCase : Any = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 109 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Union[str, Any] = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 93 | 0 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int = 10 ,_UpperCAmelCase : int = 22 ) -> int:
__snake_case : List[str] = range(1 ,_UpperCAmelCase )
__snake_case : Optional[int] = range(1 ,_UpperCAmelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(1_0, 2_2) = }""")
| 360 |
'''simple docstring'''
from __future__ import annotations
A__ : List[Any] = list[list[int]]
# assigning initial values to the grid
A__ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
A__ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a_ ( _UpperCAmelCase : Matrix ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a_ ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a_ ( _UpperCAmelCase : Matrix ) -> Matrix | None:
if location := find_empty_location(_UpperCAmelCase ):
__snake_case , __snake_case : Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 ,10 ):
if is_safe(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ):
__snake_case : Union[str, Any] = digit
if sudoku(_UpperCAmelCase ) is not None:
return grid
__snake_case : Optional[Any] = 0
return None
def a_ ( _UpperCAmelCase : Matrix ) -> None:
for row in grid:
for cell in row:
print(_UpperCAmelCase ,end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 2_0)
print_solution(example_grid)
print('''\nExample grid solution:''')
A__ : List[str] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 0 | 0 |
from collections.abc import Sequence
def A ( a_ ,a_ ) -> float:
return sum(c * (x**i) for i, c in enumerate(a_ ) )
def A ( a_ ,a_ ) -> float:
__UpperCamelCase : str =0.0
for coeff in reversed(a_ ):
__UpperCamelCase : Any =result * x + coeff
return result
if __name__ == "__main__":
A_ :str = (0.0, 0.0, 5.0, 9.3, 7.0)
A_ :Tuple = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 71 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A ( a , a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =StableDiffusionDiffEditPipeline
UpperCamelCase__ : str =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""}
UpperCamelCase__ : Optional[Any] =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""}
UpperCamelCase__ : Dict =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase__ : Any =frozenset([] )
def __lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : Dict =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase__ , )
__UpperCamelCase : List[str] =DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
__UpperCamelCase : Union[str, Any] =DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase__ , set_alpha_to_zero=lowerCamelCase__ , )
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__UpperCamelCase : Tuple =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
__UpperCamelCase : Any =CLIPTextModel(lowerCamelCase__ )
__UpperCamelCase : int =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase : Union[str, Any] ={
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
__UpperCamelCase : int =floats_tensor((1, 16, 16) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__UpperCamelCase : Any =floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith('mps' ):
__UpperCamelCase : Any =torch.manual_seed(lowerCamelCase__ )
else:
__UpperCamelCase : Optional[int] =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__UpperCamelCase : Dict ={
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
__UpperCamelCase : Tuple =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__UpperCamelCase : int =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase : Optional[Any] =Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('RGB' )
if str(lowerCamelCase__ ).startswith('mps' ):
__UpperCamelCase : List[Any] =torch.manual_seed(lowerCamelCase__ )
else:
__UpperCamelCase : Any =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__UpperCamelCase : Optional[int] ={
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
__UpperCamelCase : str =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__UpperCamelCase : Any =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase : int =Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('RGB' )
if str(lowerCamelCase__ ).startswith('mps' ):
__UpperCamelCase : Any =torch.manual_seed(lowerCamelCase__ )
else:
__UpperCamelCase : int =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__UpperCamelCase : Optional[int] ={
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self ):
"""simple docstring"""
if not hasattr(self.pipeline_class , '_optional_components' ):
return
__UpperCamelCase : Optional[Any] =self.get_dummy_components()
__UpperCamelCase : List[str] =self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
__UpperCamelCase : Union[str, Any] =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : List[Any] =pipe(**lowerCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase__ )
__UpperCamelCase : Tuple =self.pipeline_class.from_pretrained(lowerCamelCase__ )
pipe_loaded.to(lowerCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase__ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCamelCase__ , lowerCamelCase__ ) is None , f'`{optional_component}` did not stay set to None after loading.' , )
__UpperCamelCase : str =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =pipe_loaded(**lowerCamelCase__ )[0]
__UpperCamelCase : Tuple =np.abs(output - output_loaded ).max()
self.assertLess(lowerCamelCase__ , 1E-4 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any ='cpu'
__UpperCamelCase : Union[str, Any] =self.get_dummy_components()
__UpperCamelCase : Any =self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : int =self.get_dummy_mask_inputs(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =pipe.generate_mask(**lowerCamelCase__ )
__UpperCamelCase : int =mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
__UpperCamelCase : Tuple =np.array([0] * 9 )
__UpperCamelCase : str =np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase__ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int ='cpu'
__UpperCamelCase : Union[str, Any] =self.get_dummy_components()
__UpperCamelCase : Optional[Any] =self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Dict =self.get_dummy_inversion_inputs(lowerCamelCase__ )
__UpperCamelCase : List[Any] =pipe.invert(**lowerCamelCase__ ).images
__UpperCamelCase : Optional[Any] =image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
__UpperCamelCase : List[str] =np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
__UpperCamelCase : int =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase__ , 1E-3 )
def __lowercase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] ='cpu'
__UpperCamelCase : int =self.get_dummy_components()
__UpperCamelCase : str ={'beta_start': 0.00_085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
__UpperCamelCase : str =DPMSolverMultistepScheduler(**lowerCamelCase__ )
__UpperCamelCase : Dict =DPMSolverMultistepInverseScheduler(**lowerCamelCase__ )
__UpperCamelCase : Any =self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Tuple =self.get_dummy_inversion_inputs(lowerCamelCase__ )
__UpperCamelCase : str =pipe.invert(**lowerCamelCase__ ).images
__UpperCamelCase : List[Any] =image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
__UpperCamelCase : List[str] =np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
__UpperCamelCase : Optional[Any] =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase__ , 1E-3 )
@require_torch_gpu
@slow
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __lowercase ( cls ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
__UpperCamelCase : Union[str, Any] =raw_image.convert('RGB' ).resize((768, 768) )
__UpperCamelCase : List[Any] =raw_image
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =torch.manual_seed(0 )
__UpperCamelCase : Dict =StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa )
__UpperCamelCase : List[str] =DDIMScheduler.from_config(pipe.scheduler.config )
__UpperCamelCase : List[str] =DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : List[str] ='a bowl of fruit'
__UpperCamelCase : Dict ='a bowl of pears'
__UpperCamelCase : Tuple =pipe.generate_mask(
image=self.raw_image , source_prompt=lowerCamelCase__ , target_prompt=lowerCamelCase__ , generator=lowerCamelCase__ , )
__UpperCamelCase : int =pipe.invert(
prompt=lowerCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=lowerCamelCase__ ).latents
__UpperCamelCase : Dict =pipe(
prompt=lowerCamelCase__ , mask_image=lowerCamelCase__ , image_latents=lowerCamelCase__ , generator=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
__UpperCamelCase : str =(
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =torch.manual_seed(0 )
__UpperCamelCase : List[Any] =StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa )
__UpperCamelCase : Optional[Any] =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__UpperCamelCase : Optional[int] =DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Optional[Any] ='a bowl of fruit'
__UpperCamelCase : int ='a bowl of pears'
__UpperCamelCase : str =pipe.generate_mask(
image=self.raw_image , source_prompt=lowerCamelCase__ , target_prompt=lowerCamelCase__ , generator=lowerCamelCase__ , )
__UpperCamelCase : List[str] =pipe.invert(
prompt=lowerCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=lowerCamelCase__ , num_inference_steps=25 , ).latents
__UpperCamelCase : List[str] =pipe(
prompt=lowerCamelCase__ , mask_image=lowerCamelCase__ , image_latents=lowerCamelCase__ , generator=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
__UpperCamelCase : Tuple =(
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 71 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __lowerCAmelCase ( lowercase : str , lowercase : str=False ) -> Optional[int]:
"""simple docstring"""
snake_case : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'module.blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'module.blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'module.blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'module.blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'module.blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __lowerCAmelCase ( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : List[Any]=False ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
snake_case : Tuple = ""
else:
snake_case : Optional[int] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case : Tuple = state_dict.pop(F'module.blocks.{i}.attn.qkv.weight' )
snake_case : Optional[int] = state_dict.pop(F'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case : List[str] = in_proj_weight[
: config.hidden_size, :
]
snake_case : str = in_proj_bias[: config.hidden_size]
snake_case : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case : Any = in_proj_weight[
-config.hidden_size :, :
]
snake_case : List[Any] = in_proj_bias[-config.hidden_size :]
def __lowerCAmelCase ( lowercase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
snake_case : Optional[int] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def __lowerCAmelCase ( lowercase : Tuple ) -> Optional[Any]:
"""simple docstring"""
snake_case : List[str] = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def __lowerCAmelCase ( lowercase : Tuple , lowercase : Any , lowercase : Any ) -> Optional[int]:
"""simple docstring"""
snake_case : List[Any] = dct.pop(lowercase )
snake_case : Dict = val
def __lowerCAmelCase ( lowercase : Tuple , lowercase : Tuple ) -> List[str]:
"""simple docstring"""
snake_case : Optional[int] = ViTMSNConfig()
snake_case : Any = 1000
snake_case : Any = "datasets/huggingface/label-files"
snake_case : List[str] = "imagenet-1k-id2label.json"
snake_case : Optional[int] = json.load(open(hf_hub_download(lowercase , lowercase ) , "r" ) )
snake_case : Any = {int(lowercase ): v for k, v in idalabel.items()}
snake_case : List[Any] = idalabel
snake_case : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
snake_case : Union[str, Any] = 384
snake_case : List[Any] = 1536
snake_case : Optional[Any] = 6
elif "l16" in checkpoint_url:
snake_case : List[Any] = 1024
snake_case : Optional[int] = 4096
snake_case : Union[str, Any] = 24
snake_case : Dict = 16
snake_case : Optional[int] = 0.1
elif "b4" in checkpoint_url:
snake_case : Dict = 4
elif "l7" in checkpoint_url:
snake_case : Union[str, Any] = 7
snake_case : str = 1024
snake_case : Dict = 4096
snake_case : Optional[int] = 24
snake_case : Optional[int] = 16
snake_case : Any = 0.1
snake_case : List[Any] = ViTMSNModel(lowercase )
snake_case : int = torch.hub.load_state_dict_from_url(lowercase , map_location="cpu" )["target_encoder"]
snake_case : Dict = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowercase )
snake_case : List[Any] = create_rename_keys(lowercase , base_model=lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
read_in_q_k_v(lowercase , lowercase , base_model=lowercase )
model.load_state_dict(lowercase )
model.eval()
snake_case : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case : Dict = Image.open(requests.get(lowercase , stream=lowercase ).raw )
snake_case : int = ViTImageProcessor(
size=config.image_size , image_mean=lowercase , image_std=lowercase )
snake_case : Optional[int] = image_processor(images=lowercase , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
snake_case : Optional[Any] = model(**lowercase )
snake_case : str = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
snake_case : str = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
snake_case : Dict = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
snake_case : Union[str, Any] = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
snake_case : int = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
snake_case : Optional[int] = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , lowercase , atol=1e-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__snake_case = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 112 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case = {
"""configuration_conditional_detr""": [
"""CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ConditionalDetrConfig""",
"""ConditionalDetrOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""ConditionalDetrFeatureExtractor"""]
__snake_case = ["""ConditionalDetrImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConditionalDetrForObjectDetection""",
"""ConditionalDetrForSegmentation""",
"""ConditionalDetrModel""",
"""ConditionalDetrPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 112 | 1 |
"""simple docstring"""
import os
import sys
import unittest
_a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_a = os.path.join(git_repo_path, 'src', 'transformers')
_a = '\n{0} = None\n'
_a = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
_a = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
__lowercase = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" )
self.assertIsNone(UpperCAmelCase__ )
__lowercase = find_backend(" if not is_tokenizers_available():" )
self.assertEqual(UpperCAmelCase__, "tokenizers" )
__lowercase = find_backend(" if not is_tensorflow_text_available():" )
self.assertEqual(UpperCAmelCase__, "tensorflow_text" )
__lowercase = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" )
self.assertEqual(UpperCAmelCase__, "sentencepiece_and_tokenizers" )
__lowercase = find_backend(
" if not (is_sentencepiece_available() and is_tensorflow_text_available()):" )
self.assertEqual(UpperCAmelCase__, "sentencepiece_and_tensorflow_text" )
__lowercase = find_backend(
" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" )
self.assertEqual(UpperCAmelCase__, "sentencepiece_and_tokenizers_and_vision" )
def _lowercase ( self : Tuple ):
__lowercase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch", UpperCAmelCase__ )
self.assertIn("tensorflow_text", UpperCAmelCase__ )
self.assertIn("sentencepiece_and_tokenizers", UpperCAmelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("BertModel", objects["torch"] )
self.assertIn("TFBertModel", objects["tf"] )
self.assertIn("FlaxBertModel", objects["flax"] )
self.assertIn("BertModel", objects["torch"] )
self.assertIn("TFBertTokenizer", objects["tensorflow_text"] )
self.assertIn("convert_slow_tokenizer", objects["sentencepiece_and_tokenizers"] )
def _lowercase ( self : Optional[Any] ):
__lowercase = create_dummy_object("CONSTANT", "'torch'" )
self.assertEqual(UpperCAmelCase__, "\nCONSTANT = None\n" )
__lowercase = create_dummy_object("function", "'torch'" )
self.assertEqual(
UpperCAmelCase__, "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
__lowercase = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n"
__lowercase = create_dummy_object("FakeClass", "'torch'" )
self.assertEqual(UpperCAmelCase__, UpperCAmelCase__ )
def _lowercase ( self : List[Any] ):
__lowercase = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n"
__lowercase = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"], UpperCAmelCase__ )
| 17 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
UpperCAmelCase_ : int = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
UpperCAmelCase_ : Optional[Any] = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
UpperCAmelCase_ : int = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Any="binary" ) -> Dict:
"""simple docstring"""
UpperCamelCase :List[str] = simple_accuracy(__magic_name__ , __magic_name__ )
UpperCamelCase :Dict = float(fa_score(y_true=__magic_name__ , y_pred=__magic_name__ , average=__magic_name__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase :Optional[Any] = {}
for id_pred, label in zip(__magic_name__ , __magic_name__ ):
UpperCamelCase :str = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
UpperCamelCase :Union[str, Any] = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCamelCase :Dict = [(pred, label)]
UpperCamelCase , UpperCamelCase :Optional[int] = [], []
for question, preds_labels in question_map.items():
UpperCamelCase , UpperCamelCase :Optional[Any] = zip(*__magic_name__ )
UpperCamelCase :Optional[int] = fa_score(y_true=__magic_name__ , y_pred=__magic_name__ , average="""macro""" )
fas.append(__magic_name__ )
UpperCamelCase :int = int(sum(pred == label for pred, label in preds_labels ) == len(__magic_name__ ) )
ems.append(__magic_name__ )
UpperCamelCase :Optional[int] = float(sum(__magic_name__ ) / len(__magic_name__ ) )
UpperCamelCase :str = sum(__magic_name__ ) / len(__magic_name__ )
UpperCamelCase :Tuple = float(fa_score(y_true=__magic_name__ , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _A ( self : str ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def _A ( self : Optional[Any] ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def _A ( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : str ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__lowerCamelCase , __lowerCamelCase )}
elif self.config_name == "cb":
return acc_and_fa(__lowerCamelCase , __lowerCamelCase , fa_avg="""macro""" )
elif self.config_name == "record":
UpperCamelCase :Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
UpperCamelCase :Tuple = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(__lowerCamelCase , __lowerCamelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__lowerCamelCase , __lowerCamelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__lowerCamelCase , __lowerCamelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 38 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : Tuple = u
for i in range(1 , lowerCAmelCase ):
__magic_name__ : Any = temp * (u - i)
return temp
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = int(input('enter the numbers of values: ' ) )
__magic_name__ : list[list[float]] = []
for _ in range(lowerCAmelCase ):
y.append([] )
for i in range(lowerCAmelCase ):
for j in range(lowerCAmelCase ):
y[i].append(lowerCAmelCase )
__magic_name__ : Tuple = 0
print('enter the values of parameters in a list: ' )
__magic_name__ : Union[str, Any] = list(map(lowerCAmelCase , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(lowerCAmelCase ):
__magic_name__ : List[Any] = float(input() )
__magic_name__ : List[str] = int(input('enter the value to interpolate: ' ) )
__magic_name__ : Optional[int] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowerCAmelCase ):
for j in range(n - i ):
__magic_name__ : Dict = y[j + 1][i - 1] - y[j][i - 1]
__magic_name__ : Dict = y[0][0]
for i in range(1 , lowerCAmelCase ):
summ += (ucal(lowerCAmelCase , lowerCAmelCase ) * y[0][i]) / math.factorial(lowerCAmelCase )
print(f'the value at {value} is {summ}' )
if __name__ == "__main__":
main() | 368 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
set_seed(7_7_0)
lowerCAmelCase :str = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowerCAmelCase :Any = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowerCAmelCase :List[Any] = os.path.dirname(os.path.abspath(__file__))
lowerCAmelCase :List[Any] = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowerCAmelCase :List[str] = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any]=False ):
"""simple docstring"""
__magic_name__ : str = model_type
if use_small:
key += "_small"
return os.path.join(lowerCAmelCase , REMOTE_MODEL_PATHS[key]['file_name'] )
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
hf_hub_download(repo_id=lowerCAmelCase , filename=lowerCAmelCase , local_dir=lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : str="text" ):
"""simple docstring"""
if model_type == "text":
__magic_name__ : Tuple = BarkSemanticModel
__magic_name__ : Optional[int] = BarkSemanticConfig
__magic_name__ : List[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
__magic_name__ : List[str] = BarkCoarseModel
__magic_name__ : Dict = BarkCoarseConfig
__magic_name__ : Tuple = BarkCoarseGenerationConfig
elif model_type == "fine":
__magic_name__ : Optional[Any] = BarkFineModel
__magic_name__ : Dict = BarkFineConfig
__magic_name__ : Tuple = BarkFineGenerationConfig
else:
raise NotImplementedError()
__magic_name__ : int = f'{model_type}_small' if use_small else model_type
__magic_name__ : List[str] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCAmelCase ):
logger.info(f'{model_type} model not found, downloading into `{CACHE_DIR}`.' )
_download(model_info['repo_id'] , model_info['file_name'] )
__magic_name__ : Optional[Any] = torch.load(lowerCAmelCase , map_location=lowerCAmelCase )
# this is a hack
__magic_name__ : Optional[Any] = checkpoint['model_args']
if "input_vocab_size" not in model_args:
__magic_name__ : Dict = model_args['vocab_size']
__magic_name__ : Optional[int] = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__magic_name__ : Optional[Any] = model_args.pop('n_head' )
__magic_name__ : List[str] = model_args.pop('n_embd' )
__magic_name__ : List[Any] = model_args.pop('n_layer' )
__magic_name__ : Optional[Any] = ConfigClass(**checkpoint['model_args'] )
__magic_name__ : Any = ModelClass(config=lowerCAmelCase )
__magic_name__ : List[str] = GenerationConfigClass()
__magic_name__ : List[Any] = model_generation_config
__magic_name__ : str = checkpoint['model']
# fixup checkpoint
__magic_name__ : str = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(lowerCAmelCase ):
# replace part of the key with corresponding layer name in HF implementation
__magic_name__ : Tuple = k[len(lowerCAmelCase ) :]
for old_layer_name in new_layer_name_dict:
__magic_name__ : int = new_k.replace(lowerCAmelCase , new_layer_name_dict[old_layer_name] )
__magic_name__ : Union[str, Any] = state_dict.pop(lowerCAmelCase )
__magic_name__ : Optional[Any] = set(state_dict.keys() ) - set(model.state_dict().keys() )
__magic_name__ : Any = {k for k in extra_keys if not k.endswith('.attn.bias' )}
__magic_name__ : Any = set(model.state_dict().keys() ) - set(state_dict.keys() )
__magic_name__ : Dict = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(lowerCAmelCase ) != 0:
raise ValueError(f'extra keys found: {extra_keys}' )
if len(lowerCAmelCase ) != 0:
raise ValueError(f'missing keys: {missing_keys}' )
model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
__magic_name__ : Union[str, Any] = model.num_parameters(exclude_embeddings=lowerCAmelCase )
__magic_name__ : Optional[Any] = checkpoint['best_val_loss'].item()
logger.info(f'model loaded: {round(n_params/1e6 , 1 )}M params, {round(lowerCAmelCase , 3 )} loss' )
model.eval()
model.to(lowerCAmelCase )
del checkpoint, state_dict
return model
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Tuple="text" ):
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__magic_name__ : List[str] = 'cpu' # do conversion on cpu
__magic_name__ : int = _get_ckpt_path(lowerCAmelCase , use_small=lowerCAmelCase )
__magic_name__ : Any = _load_model(lowerCAmelCase , lowerCAmelCase , model_type=lowerCAmelCase , use_small=lowerCAmelCase )
# load bark initial model
__magic_name__ : List[str] = _bark_load_model(lowerCAmelCase , 'cpu' , model_type=lowerCAmelCase , use_small=lowerCAmelCase )
if model_type == "text":
__magic_name__ : int = bark_model['model']
if model.num_parameters(exclude_embeddings=lowerCAmelCase ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
__magic_name__ : Union[str, Any] = 5
__magic_name__ : Optional[int] = 10
if model_type in ["text", "coarse"]:
__magic_name__ : Optional[Any] = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
__magic_name__ : List[str] = bark_model(lowerCAmelCase )[0]
__magic_name__ : Optional[int] = model(lowerCAmelCase )
# take last logits
__magic_name__ : int = output_new_model_total.logits[:, [-1], :]
else:
__magic_name__ : Tuple = 3
__magic_name__ : List[str] = 8
__magic_name__ : List[str] = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__magic_name__ : str = model(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Tuple = bark_model(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Tuple = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
model.save_pretrained(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : str , ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Dict = BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase , 'config.json' ) )
__magic_name__ : str = BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase , 'config.json' ) )
__magic_name__ : int = BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase , 'config.json' ) )
__magic_name__ : List[Any] = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
__magic_name__ : Optional[int] = BarkSemanticModel.from_pretrained(lowerCAmelCase )
__magic_name__ : Dict = BarkCoarseModel.from_pretrained(lowerCAmelCase )
__magic_name__ : List[str] = BarkFineModel.from_pretrained(lowerCAmelCase )
__magic_name__ : Optional[Any] = EncodecModel.from_pretrained('facebook/encodec_24khz' )
__magic_name__ : Dict = BarkConfig.from_sub_model_configs(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__magic_name__ : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__magic_name__ : int = BarkModel(lowerCAmelCase )
__magic_name__ : List[str] = semantic
__magic_name__ : Optional[int] = coarseAcoustic
__magic_name__ : List[str] = fineAcoustic
__magic_name__ : int = codec
__magic_name__ : Union[str, Any] = bark_generation_config
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
bark.save_pretrained(lowerCAmelCase , repo_id=lowerCAmelCase , push_to_hub=lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowerCAmelCase :Union[str, Any] = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small) | 275 | 0 |
from __future__ import annotations
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : Optional[Any] =TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(lowerCAmelCase__ ) != 0:
a__ : str =len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCAmelCase__ ) != cols:
raise error
for value in row:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise error
a__ : Union[str, Any] =rows
else:
a__ : List[Any] =[]
def _lowercase ( self ) -> list[list[int]]:
'''simple docstring'''
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return len(self.rows )
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return len(self.rows[0] )
@property
def _lowercase ( self ) -> tuple[int, int]:
'''simple docstring'''
return (self.num_rows, self.num_columns)
@property
def _lowercase ( self ) -> bool:
'''simple docstring'''
return self.order[0] == self.order[1]
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
a__ : Any =[
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCAmelCase__ )
def _lowercase ( self ) -> int:
'''simple docstring'''
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _lowercase ( self ) -> bool:
'''simple docstring'''
return bool(self.determinant() )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : int =[
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCAmelCase__ ).determinant()
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
if (row + column) % 2 == 0:
return self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ )
return -1 * self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
return Matrix(
[
[self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
a__ : Tuple =[
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCAmelCase__ )
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
a__ : List[Any] =self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__( self ) -> str:
'''simple docstring'''
return str(self.rows )
def __str__( self ) -> str:
'''simple docstring'''
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(lowerCAmelCase__ ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> None:
'''simple docstring'''
a__ : Union[str, Any] =TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise type_error
for value in row:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise type_error
if len(lowerCAmelCase__ ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(lowerCAmelCase__ )
else:
a__ : Optional[int] =self.rows[0:position] + [row] + self.rows[position:]
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> None:
'''simple docstring'''
a__ : Tuple =TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise type_error
for value in column:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise type_error
if len(lowerCAmelCase__ ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
a__ : Union[str, Any] =[self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
a__ : Dict =[
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , lowerCAmelCase__ ) -> bool:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , lowerCAmelCase__ ) -> bool:
'''simple docstring'''
return not self == other
def __neg__( self ) -> Matrix:
'''simple docstring'''
return self * -1
def __add__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(lowerCAmelCase__ , lowerCAmelCase__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
a__ : List[str] =self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _lowercase ( cls , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
return sum(row[i] * column[i] for i in range(len(lowerCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Union[str, Any] , _A :List[Any]=0.01 , _A :Optional[Any]=1_000 ) -> Tuple:
'''simple docstring'''
__A = p_stop
__A = max_length
def __iter__( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
__A = 0
__A = False
while not stop and count < self.max_length:
yield count
count += 1
__A = random.random() < self.p_stop
class UpperCamelCase__ ( unittest.TestCase):
def lowercase_ ( self :List[Any] , _A :Tuple , _A :int , _A :Tuple=False , _A :str=True ) -> Optional[int]:
'''simple docstring'''
__A = [
BatchSamplerShard(_A , 2 , _A , split_batches=_A , even_batches=_A )
for i in range(2 )
]
__A = [list(_A ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_A ) for shard in batch_sampler_shards] , [len(_A ) for e in expected] )
self.assertListEqual(_A , _A )
def lowercase_ ( self :Any ) -> int:
'''simple docstring'''
__A = BatchSampler(range(24 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_A , _A )
__A = BatchSampler(range(24 ) , batch_size=3 , drop_last=_A )
# Expected shouldn't change
self.check_batch_sampler_shards(_A , _A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__A = BatchSampler(range(21 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(_A , _A )
__A = BatchSampler(range(21 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__A = BatchSampler(range(22 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(_A , _A )
__A = BatchSampler(range(22 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__A = BatchSampler(range(20 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(_A , _A )
__A = BatchSampler(range(20 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A )
# Check the shards when the dataset is very small.
__A = BatchSampler(range(2 ) , batch_size=3 , drop_last=_A )
__A = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_A , _A )
__A = BatchSampler(range(2 ) , batch_size=3 , drop_last=_A )
__A = [[], []]
self.check_batch_sampler_shards(_A , _A )
def lowercase_ ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__A = BatchSampler(range(24 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
__A = BatchSampler(range(24 ) , batch_size=4 , drop_last=_A )
# Expected shouldn't change
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size.
__A = BatchSampler(range(22 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
__A = BatchSampler(range(22 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__A = BatchSampler(range(21 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
__A = BatchSampler(range(21 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
# Check the shards when the dataset is very small.
__A = BatchSampler(range(2 ) , batch_size=4 , drop_last=_A )
__A = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
__A = BatchSampler(range(2 ) , batch_size=4 , drop_last=_A )
__A = [[], []]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
def lowercase_ ( self :Tuple ) -> List[str]:
'''simple docstring'''
__A = BatchSampler(range(24 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
__A = BatchSampler(range(24 ) , batch_size=3 , drop_last=_A )
# Expected shouldn't change
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__A = BatchSampler(range(21 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
__A = BatchSampler(range(21 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__A = BatchSampler(range(22 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
__A = BatchSampler(range(22 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__A = BatchSampler(range(20 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
__A = BatchSampler(range(20 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
# Check the shards when the dataset is very small.
__A = BatchSampler(range(2 ) , batch_size=3 , drop_last=_A )
__A = [[[0, 1]], []]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
__A = BatchSampler(range(2 ) , batch_size=3 , drop_last=_A )
__A = [[], []]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
def lowercase_ ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
__A = BatchSampler(range(24 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
__A = BatchSampler(range(24 ) , batch_size=4 , drop_last=_A )
# Expected shouldn't change
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size.
__A = BatchSampler(range(22 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
__A = BatchSampler(range(22 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__A = BatchSampler(range(21 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
__A = BatchSampler(range(21 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
# Check the shards when the dataset is very small.
__A = BatchSampler(range(2 ) , batch_size=4 , drop_last=_A )
__A = [[[0, 1]], []]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
__A = BatchSampler(range(2 ) , batch_size=4 , drop_last=_A )
__A = [[], []]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
def lowercase_ ( self :Tuple ) -> Dict:
'''simple docstring'''
__A = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__A = [BatchSamplerShard(_A , 2 , _A , even_batches=_A ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowercase_ ( self :int , _A :Optional[Any] , _A :List[str] , _A :Dict , _A :Any=False , _A :str=2 , _A :Any=False ) -> Dict:
'''simple docstring'''
random.seed(_A )
__A = list(_A )
__A = [
IterableDatasetShard(
_A , batch_size=_A , drop_last=_A , num_processes=_A , process_index=_A , split_batches=_A , )
for i in range(_A )
]
__A = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_A )
iterable_dataset_lists.append(list(_A ) )
__A = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__A = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_A ) , len(_A ) )
self.assertTrue(len(_A ) % shard_batch_size == 0 )
__A = []
for idx in range(0 , len(_A ) , _A ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_A ) < len(_A ):
reference += reference
self.assertListEqual(_A , reference[: len(_A )] )
def lowercase_ ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
__A = 42
__A = RandomIterableDataset()
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
# Edge case with a very small dataset
__A = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
def lowercase_ ( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
__A = BatchSampler(range(16 ) , batch_size=4 , drop_last=_A )
__A = SkipBatchSampler(_A , 2 )
self.assertListEqual(list(_A ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self :List[str] ) -> Any:
'''simple docstring'''
__A = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self :Any ) -> Dict:
'''simple docstring'''
__A = DataLoader(list(range(16 ) ) , batch_size=4 )
__A = skip_first_batches(_A , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
__A = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(_A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowercase_ ( self :Dict ) -> Any:
'''simple docstring'''
Accelerator()
__A = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(_A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 161 | 0 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =logging.get_logger()
# the current default level is logging.WARNING
__a =logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(_a )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =logging.get_verbosity()
__a =logging.get_logger('transformers.models.bart.tokenization_bart' )
__a ='''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(_a ) as cl:
logger.warning(_a )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(_a ) as cl:
logger.warning(_a )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(_a ) as cl:
logger.warning(_a )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(_a )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__a =logging.get_logger('transformers.models.bart.tokenization_bart' )
__a =os.getenv('TRANSFORMERS_VERBOSITY' , _a )
__a =logging.log_levels[env_level_str]
__a =logging.get_verbosity()
self.assertEqual(
_a , _a , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , )
# restore to the original level
__a =''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
__a =logging.logging.getLogger()
with CaptureLogger(_a ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def __magic_name__ ( self ) -> int:
'''simple docstring'''
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
__a =logging.get_logger('transformers.models.bart.tokenization_bart' )
__a ='''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(_a ) as cl:
logger.warning_advice(_a )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(_a ) as cl:
logger.warning_advice(_a )
self.assertEqual(cl.out , msg + '\n' )
def UpperCamelCase_( ):
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 365 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE = False
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
__a =['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
__a =dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__a =['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
__a ={'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
def __magic_name__ ( self , **__snake_case ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def __magic_name__ ( self , __snake_case ) -> List[Any]:
'''simple docstring'''
__a ='adapt act apte'
__a ='adapt act apte'
return input_text, output_text
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a ='adapt act apte'
__a =['adapt', 'act', 'ap@@', 'te']
__a =tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =[tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__a =[0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
__a ='I am a small frog.'
__a =tok([src_text] , padding=__snake_case , truncation=__snake_case )['input_ids']
__a =tok.batch_decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
__a ='I am a small frog .'
__a ='.'
__a =tok(__snake_case )['input_ids']
__a =tok(__snake_case )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 308 | 0 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (PNDMScheduler,)
SCREAMING_SNAKE_CASE__ = (("""num_inference_steps""", 50),)
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**UpperCamelCase_ )
return config
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=0 , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = dict(self.forward_default_kwargs )
UpperCamelCase__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase_ )
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
UpperCamelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ = self.get_scheduler_config(**UpperCamelCase_ )
UpperCamelCase__ = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals
UpperCamelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase_ )
UpperCamelCase__ = scheduler_class.from_pretrained(UpperCamelCase_ )
new_scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals
UpperCamelCase__ = dummy_past_residuals[:]
UpperCamelCase__ = scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
UpperCamelCase__ = new_scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCamelCase__ = scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
UpperCamelCase__ = new_scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ (self ):
pass
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=0 , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = dict(self.forward_default_kwargs )
UpperCamelCase__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase_ )
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
UpperCamelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase_ )
UpperCamelCase__ = scheduler_class.from_pretrained(UpperCamelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase__ = dummy_past_residuals[:]
UpperCamelCase__ = scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
UpperCamelCase__ = new_scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCamelCase__ = scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
UpperCamelCase__ = new_scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(**UpperCamelCase_ )
UpperCamelCase__ = scheduler_class(**UpperCamelCase_ )
UpperCamelCase__ = 10
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCamelCase__ = model(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ = scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCamelCase__ = model(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ = scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
return sample
def UpperCAmelCase_ (self ):
UpperCamelCase__ = dict(self.forward_default_kwargs )
UpperCamelCase__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase_ )
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**UpperCamelCase_ )
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase_ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCamelCase_ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase_ , """set_timesteps""" ):
UpperCamelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCamelCase__ = dummy_past_residuals[:]
UpperCamelCase__ = scheduler.step_prk(UpperCamelCase_ , 0 , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
UpperCamelCase__ = scheduler.step_prk(UpperCamelCase_ , 1 , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCamelCase__ = scheduler.step_plms(UpperCamelCase_ , 0 , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
UpperCamelCase__ = scheduler.step_plms(UpperCamelCase_ , 1 , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ (self ):
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def UpperCAmelCase_ (self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCamelCase_ )
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase__ = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def UpperCAmelCase_ (self ):
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def UpperCAmelCase_ (self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def UpperCAmelCase_ (self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def UpperCAmelCase_ (self ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=UpperCamelCase_ )
def UpperCAmelCase_ (self ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=UpperCamelCase_ )
def UpperCAmelCase_ (self ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCamelCase__ = 27
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(UpperCamelCase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCamelCase__ = scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
def UpperCAmelCase_ (self ):
with self.assertRaises(UpperCamelCase_ ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**UpperCamelCase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.full_loop()
UpperCamelCase__ = torch.sum(torch.abs(UpperCamelCase_ ) )
UpperCamelCase__ = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.full_loop(prediction_type="""v_prediction""" )
UpperCamelCase__ = torch.sum(torch.abs(UpperCamelCase_ ) )
UpperCamelCase__ = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def UpperCAmelCase_ (self ):
# We specify different beta, so that the first alpha is 0.99
UpperCamelCase__ = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.01 )
UpperCamelCase__ = torch.sum(torch.abs(UpperCamelCase_ ) )
UpperCamelCase__ = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def UpperCAmelCase_ (self ):
# We specify different beta, so that the first alpha is 0.99
UpperCamelCase__ = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.01 )
UpperCamelCase__ = torch.sum(torch.abs(UpperCamelCase_ ) )
UpperCamelCase__ = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 244 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case )
class UpperCAmelCase_ ( snake_case ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
UpperCamelCase =field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
UpperCamelCase =Features({"question": Value("string" ), "context": Value("string" )} )
UpperCamelCase =Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
UpperCamelCase ="question"
UpperCamelCase ="context"
UpperCamelCase ="answers"
@property
def _lowerCamelCase ( self ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 249 | 0 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowerCamelCase__ = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
lowerCamelCase__ = subprocess.check_output(F"git diff --name-only {fork_point_sha}".split()).decode('''utf-8''').split()
lowerCamelCase__ = '''|'''.join(sys.argv[1:])
lowerCamelCase__ = re.compile(RF"^({joined_dirs}).*?\.py$")
lowerCamelCase__ = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 63 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase__ = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
lowerCamelCase__ = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase__ = {F"funnel-transformer/{name}": 512 for name in _model_names}
lowerCamelCase__ = {F"funnel-transformer/{name}": {'''do_lower_case''': True} for name in _model_names}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_INIT_CONFIGURATION
__A = FunnelTokenizer
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = 2
def __init__( self : Tuple , lowercase_ : Any=None , lowercase_ : List[Any]=None , lowercase_ : List[str]=True , lowercase_ : List[str]="<unk>" , lowercase_ : List[Any]="<sep>" , lowercase_ : int="<pad>" , lowercase_ : Dict="<cls>" , lowercase_ : int="<mask>" , lowercase_ : Any="<s>" , lowercase_ : Tuple="</s>" , lowercase_ : List[str]=True , lowercase_ : Any=True , lowercase_ : str=None , lowercase_ : Dict="##" , **lowercase_ : Optional[int] , ) -> Dict:
"""simple docstring"""
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , clean_text=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , wordpieces_prefix=lowercase_ , **lowercase_ , )
_UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , lowercase_) != do_lower_case
or normalizer_state.get("strip_accents" , lowercase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowercase_) != tokenize_chinese_chars
):
_UpperCamelCase = getattr(lowercase_ , normalizer_state.pop("type"))
_UpperCamelCase = do_lower_case
_UpperCamelCase = strip_accents
_UpperCamelCase = tokenize_chinese_chars
_UpperCamelCase = normalizer_class(**lowercase_)
_UpperCamelCase = do_lower_case
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=None) -> str:
"""simple docstring"""
_UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self : str , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls) * [self.cls_token_type_id] + len(token_ids_a + sep) * [0]
return len(cls) * [self.cls_token_type_id] + len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
_UpperCamelCase = self._tokenizer.model.save(lowercase_ , name=lowercase_)
return tuple(lowercase_)
| 63 | 1 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =OpenAIGPTTokenizer
UpperCamelCase__ : Any =OpenAIGPTTokenizerFast
UpperCamelCase__ : Dict =True
UpperCamelCase__ : List[str] =False
def __lowercase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase : Any =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase : Optional[Any] =dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__UpperCamelCase : List[str] =['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
__UpperCamelCase : List[str] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase : str =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(lowerCamelCase__ ) )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
return "lower newer", "lower newer"
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__UpperCamelCase : Any ='lower'
__UpperCamelCase : Any =['low', 'er</w>']
__UpperCamelCase : Optional[int] =tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] =tokens + ['<unk>']
__UpperCamelCase : Optional[Any] =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase : str =self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
# Simple input
__UpperCamelCase : str ='This is a simple input'
__UpperCamelCase : str =['This is a simple input 1', 'This is a simple input 2']
__UpperCamelCase : Union[str, Any] =('This is a simple input', 'This is a pair')
__UpperCamelCase : List[Any] =[
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' )
# Simple input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' )
# Simple input
self.assertRaises(
lowerCamelCase__ , tokenizer_r.batch_encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' , )
# Pair input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' )
# Pair input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' )
# Pair input
self.assertRaises(
lowerCamelCase__ , tokenizer_r.batch_encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' , )
def __lowercase ( self ):
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __A ( a ):
"""simple docstring"""
pass
| 71 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
A_ :List[str] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
A_ :Any = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
A_ :Tuple = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
A_ :List[str] = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
A_ :Tuple = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=[1, 10, 100] , lowerCamelCase__=4 , lowerCamelCase__=3.0 ):
"""simple docstring"""
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=lowerCamelCase__ ) as executor:
__UpperCamelCase : List[str] =[]
__UpperCamelCase : Any =Counter()
__UpperCamelCase : List[Any] =0
__UpperCamelCase : int =defaultdict(lowerCamelCase__ )
for task_id, (candidates, test_case) in enumerate(zip(lowerCamelCase__ , lowerCamelCase__ ) ):
for candidate in candidates:
__UpperCamelCase : str =candidate + '\n' + test_case
__UpperCamelCase : Any =(test_program, timeout, task_id, completion_id[task_id])
__UpperCamelCase : Optional[Any] =executor.submit(lowerCamelCase__ , *lowerCamelCase__ )
futures.append(lowerCamelCase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCamelCase__ ):
__UpperCamelCase : str =future.result()
results[result["task_id"]].append((result['completion_id'], result) )
__UpperCamelCase , __UpperCamelCase : int =[], []
for result in results.values():
result.sort()
__UpperCamelCase : str =[r[1]['passed'] for r in result]
total.append(len(lowerCamelCase__ ) )
correct.append(sum(lowerCamelCase__ ) )
__UpperCamelCase : Optional[int] =np.array(lowerCamelCase__ )
__UpperCamelCase : List[str] =np.array(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =k
__UpperCamelCase : List[Any] ={f'pass@{k}': estimate_pass_at_k(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def A ( a_ ,a_ ,a_ ) -> Optional[int]:
def estimator(a_ ,a_ ,a_ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) )
if isinstance(a_ ,a_ ):
__UpperCamelCase : Optional[int] =itertools.repeat(a_ ,len(a_ ) )
else:
assert len(a_ ) == len(a_ )
__UpperCamelCase : List[Any] =iter(a_ )
return np.array([estimator(int(a_ ) ,int(a_ ) ,a_ ) for n, c in zip(a_ ,a_ )] )
| 71 | 1 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCAmelCase__ :List[Any] = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def lowerCAmelCase__ ( a__: Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = list(s_dict.keys() )
for key in keys:
_UpperCAmelCase = R'.*/layers_(\d+)'
_UpperCAmelCase = key
if re.match(a__ , a__ ):
_UpperCAmelCase = re.sub(R'layers_(\d+)' , R'block/\1/layer' , a__ )
_UpperCAmelCase = R'(encoder|decoder)\/'
if re.match(a__ , a__ ):
_UpperCAmelCase = re.match(a__ , a__ ).groups()
if groups[0] == "encoder":
_UpperCAmelCase = re.sub(R'/mlp/' , R'/1/mlp/' , a__ )
_UpperCAmelCase = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , a__ )
elif groups[0] == "decoder":
_UpperCAmelCase = re.sub(R'/mlp/' , R'/2/mlp/' , a__ )
_UpperCAmelCase = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , a__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_UpperCAmelCase = new_key.replace(a__ , a__ )
print(F'''{key} -> {new_key}''' )
_UpperCAmelCase = s_dict.pop(a__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_UpperCAmelCase = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_UpperCAmelCase = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_UpperCAmelCase = s_dict[key].shape[0]
_UpperCAmelCase = s_dict[key]
for idx in range(a__ ):
_UpperCAmelCase = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/" , "nested fstring" )}''' )
s_dict.pop(a__ )
return s_dict
lowerCAmelCase__ :Tuple = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def lowerCAmelCase__ ( a__: str , a__: List[Any] ) -> Optional[Any]:
'''simple docstring'''
import regex as re
with open(a__ , 'r' ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase = re.findall(R'(.*) = ([0-9.]*)' , a__ )
_UpperCAmelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_UpperCAmelCase = float(a__ ) if '.' in value else int(a__ )
_UpperCAmelCase = re.findall(R'(.*activations) = \(\'(.*)\',\)' , a__ )[0]
_UpperCAmelCase = str(activation[1] )
_UpperCAmelCase = num_experts
_UpperCAmelCase = SwitchTransformersConfig(**a__ )
return config
def lowerCAmelCase__ ( a__: str , a__: List[str] , a__: Any=None , a__: List[Any]="./" , a__: List[str]=8 ) -> List[str]:
'''simple docstring'''
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
_UpperCAmelCase = checkpoints.load_tax_checkpoint(a__ )
if gin_file is not None:
_UpperCAmelCase = convert_gin_to_config(a__ , a__ )
else:
_UpperCAmelCase = SwitchTransformersConfig.from_pretrained(a__ )
_UpperCAmelCase = SwitchTransformersForConditionalGeneration(a__ )
_UpperCAmelCase = flax_params['target']
_UpperCAmelCase = flatten_dict(a__ , sep='/' )
_UpperCAmelCase = rename_keys(a__ )
_UpperCAmelCase = unflatten_dict(a__ , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(a__ , a__ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(a__ )
if __name__ == "__main__":
lowerCAmelCase__ :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
lowerCAmelCase__ :Optional[Any] = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 356 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCAmelCase__ :Tuple = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
lowerCAmelCase__ :Optional[int] = parser.parse_args()
if args.model_type == "bert":
lowerCAmelCase__ :Tuple = BertForMaskedLM.from_pretrained(args.model_name)
lowerCAmelCase__ :Optional[int] = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
lowerCAmelCase__ :Any = model.state_dict()
lowerCAmelCase__ :Dict = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCAmelCase__ :List[Any] = state_dict[f'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
lowerCAmelCase__ :Union[str, Any] = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}''']
lowerCAmelCase__ :str = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
lowerCAmelCase__ :Any = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
lowerCAmelCase__ :List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
lowerCAmelCase__ :List[str] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
lowerCAmelCase__ :List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
lowerCAmelCase__ :int = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
lowerCAmelCase__ :List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
lowerCAmelCase__ :List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
lowerCAmelCase__ :List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
lowerCAmelCase__ :Optional[int] = state_dict['''cls.predictions.decoder.weight''']
lowerCAmelCase__ :List[str] = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCAmelCase__ :Any = state_dict[f'''cls.predictions.transform.dense.{w}''']
lowerCAmelCase__ :List[str] = state_dict[f'''cls.predictions.transform.LayerNorm.{w}''']
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 185 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] , lowerCAmelCase : str = 6 ):
lowerCAmelCase = None
lowerCAmelCase = None
self.create_linked_list(lowerCAmelCase )
def __lowercase ( self : List[str] , lowerCAmelCase : List[str] ):
lowerCAmelCase = Node()
lowerCAmelCase = current_node
lowerCAmelCase = current_node
lowerCAmelCase = current_node
for _ in range(1 , lowerCAmelCase ):
lowerCAmelCase = Node()
lowerCAmelCase = current_node
lowerCAmelCase = previous_node
lowerCAmelCase = current_node
lowerCAmelCase = self.front
lowerCAmelCase = previous_node
def __lowercase ( self : Any ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __lowercase ( self : Tuple ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def __lowercase ( self : Dict , lowerCAmelCase : List[str] ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCAmelCase = self.rear.next
if self.rear:
lowerCAmelCase = data
def __lowercase ( self : Union[str, Any] ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCAmelCase = self.front.data
lowerCAmelCase = None
return data
lowerCAmelCase = self.front
lowerCAmelCase = old_front.next
lowerCAmelCase = old_front.data
lowerCAmelCase = None
return data
def __lowercase ( self : Dict ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def __lowercase ( self : List[str] ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str ):
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=64 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> str:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = vocab_size - 1
def _UpperCamelCase ( self ) -> Tuple:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self ) -> int:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _UpperCamelCase ( self ) -> int:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.prepare_config_and_inputs()
snake_case_ = True
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self , a , a , a ) -> Optional[int]:
snake_case_ = GPTNeoXModel(config=a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a )
snake_case_ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a , a , a ) -> List[str]:
snake_case_ = True
snake_case_ = GPTNeoXModel(a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a , a , a , a ) -> int:
snake_case_ = GPTNeoXForCausalLM(config=a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , a , a , a , a ) -> Optional[int]:
snake_case_ = self.num_labels
snake_case_ = GPTNeoXForQuestionAnswering(a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self , a , a , a , a ) -> List[str]:
snake_case_ = self.num_labels
snake_case_ = GPTNeoXForSequenceClassification(a )
model.to(a )
model.eval()
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , a , a , a , a ) -> str:
snake_case_ = self.num_labels
snake_case_ = GPTNeoXForTokenClassification(a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self , a , a , a ) -> List[Any]:
snake_case_ = True
snake_case_ = GPTNeoXForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
snake_case_ = model(a , attention_mask=a , use_cache=a )
snake_case_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case_ = model(a , attention_mask=a , output_hidden_states=a )
snake_case_ = output_from_no_past['hidden_states'][0]
snake_case_ = model(
a , attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _UpperCamelCase ( self ) -> str:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCAmelCase = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def _UpperCamelCase ( self ) -> str:
snake_case_ = GPTNeoXModelTester(self )
snake_case_ = ConfigTester(self , config_class=a , hidden_size=64 , num_attention_heads=8 )
def _UpperCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Optional[int]:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a )
def _UpperCamelCase ( self ) -> int:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def _UpperCamelCase ( self ) -> Dict:
# This regression test was failing with PyTorch < 1.3
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case_ = None
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def _UpperCamelCase ( self ) -> str:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(a , a , a )
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*a )
def _UpperCamelCase ( self ) -> Any:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def _UpperCamelCase ( self ) -> Any:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def _UpperCamelCase ( self ) -> List[str]:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _UpperCamelCase ( self , a ) -> int:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = ids_tensor([1, 10] , config.vocab_size )
snake_case_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case_ = GPTNeoXModel(a )
original_model.to(a )
original_model.eval()
snake_case_ = original_model(a ).last_hidden_state
snake_case_ = original_model(a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case_ = {'type': scaling_type, 'factor': 10.0}
snake_case_ = GPTNeoXModel(a )
scaled_model.to(a )
scaled_model.eval()
snake_case_ = scaled_model(a ).last_hidden_state
snake_case_ = scaled_model(a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
snake_case_ = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(a )
snake_case_ = tokenizer('My favorite food is' , return_tensors='pt' ).to(a )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
snake_case_ = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
snake_case_ = model.generate(**a , do_sample=a , max_new_tokens=20 )
snake_case_ = tokenizer.batch_decode(a )[0]
self.assertEqual(a , a )
| 178 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCAmelCase : Any = random.Random()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None) -> Any:
if rng is None:
__snake_case: Dict = global_rng
__snake_case: str = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , A : List[str] , A : List[Any]=7 , A : Optional[int]=400 , A : List[Any]=2_000 , A : Dict=2_048 , A : Tuple=128 , A : List[Any]=1 , A : Tuple=512 , A : str=30 , A : Optional[Any]=44_100 , ):
__snake_case: Dict = parent
__snake_case: Optional[Any] = batch_size
__snake_case: Optional[int] = min_seq_length
__snake_case: Optional[Any] = max_seq_length
__snake_case: List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__snake_case: Any = spectrogram_length
__snake_case: Any = feature_size
__snake_case: Union[str, Any] = num_audio_channels
__snake_case: Any = hop_length
__snake_case: List[str] = chunk_length
__snake_case: Any = sampling_rate
def UpperCAmelCase__ ( self : List[Any] ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCAmelCase__ ( self : List[str] , A : str=False , A : int=False ):
def _flatten(A : Dict ):
return list(itertools.chain(*A ) )
if equal_length:
__snake_case: List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__snake_case: int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__snake_case: Tuple = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = TvltFeatureExtractor
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: str = TvltFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : int ):
__snake_case: Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A , """spectrogram_length""" ) )
self.assertTrue(hasattr(A , """feature_size""" ) )
self.assertTrue(hasattr(A , """num_audio_channels""" ) )
self.assertTrue(hasattr(A , """hop_length""" ) )
self.assertTrue(hasattr(A , """chunk_length""" ) )
self.assertTrue(hasattr(A , """sampling_rate""" ) )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: Tuple = feat_extract_first.save_pretrained(A )[0]
check_json_file_has_correct_format(A )
__snake_case: int = self.feature_extraction_class.from_pretrained(A )
__snake_case: List[str] = feat_extract_first.to_dict()
__snake_case: str = feat_extract_second.to_dict()
__snake_case: List[Any] = dict_first.pop("""mel_filters""" )
__snake_case: str = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: str = os.path.join(A , """feat_extract.json""" )
feat_extract_first.to_json_file(A )
__snake_case: List[Any] = self.feature_extraction_class.from_json_file(A )
__snake_case: Dict = feat_extract_first.to_dict()
__snake_case: Any = feat_extract_second.to_dict()
__snake_case: int = dict_first.pop("""mel_filters""" )
__snake_case: int = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : Any ):
# Initialize feature_extractor
__snake_case: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__snake_case: Dict = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__snake_case: str = [np.asarray(A ) for speech_input in speech_inputs]
# Test not batched input
__snake_case: int = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__snake_case: Optional[int] = feature_extractor(A , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__snake_case: Union[str, Any] = feature_extractor(
A , return_tensors="""np""" , sampling_rate=44_100 , mask_audio=A ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__snake_case: Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__snake_case: Union[str, Any] = np.asarray(A )
__snake_case: List[Any] = feature_extractor(A , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCAmelCase__ ( self : Union[str, Any] , A : List[str] ):
__snake_case: Tuple = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__snake_case: List[Any] = ds.sort("""id""" ).select(range(A ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = self._load_datasamples(1 )
__snake_case: Optional[int] = TvltFeatureExtractor()
__snake_case: Optional[Any] = feature_extractor(A , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__snake_case: str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , A , atol=1E-4 ) )
| 359 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def A__ ( SCREAMING_SNAKE_CASE__) -> list[list[float]]:
__snake_case: Any = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(SCREAMING_SNAKE_CASE__) == 2 and len(matrix[0]) == 2 and len(matrix[1]) == 2:
# Calculate the determinant of the matrix
__snake_case: Tuple = float(
d(matrix[0][0]) * d(matrix[1][1]) - d(matrix[1][0]) * d(matrix[0][1]))
if determinant == 0:
raise ValueError("""This matrix has no inverse.""")
# Creates a copy of the matrix with swapped positions of the elements
__snake_case: Optional[int] = [[0.0, 0.0], [0.0, 0.0]]
__snake_case , __snake_case: Optional[Any] = matrix[1][1], matrix[0][0]
__snake_case , __snake_case: Union[str, Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(SCREAMING_SNAKE_CASE__)) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(SCREAMING_SNAKE_CASE__) == 3
and len(matrix[0]) == 3
and len(matrix[1]) == 3
and len(matrix[2]) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__snake_case: Any = float(
(
(d(matrix[0][0]) * d(matrix[1][1]) * d(matrix[2][2]))
+ (d(matrix[0][1]) * d(matrix[1][2]) * d(matrix[2][0]))
+ (d(matrix[0][2]) * d(matrix[1][0]) * d(matrix[2][1]))
)
- (
(d(matrix[0][2]) * d(matrix[1][1]) * d(matrix[2][0]))
+ (d(matrix[0][1]) * d(matrix[1][0]) * d(matrix[2][2]))
+ (d(matrix[0][0]) * d(matrix[1][2]) * d(matrix[2][1]))
))
if determinant == 0:
raise ValueError("""This matrix has no inverse.""")
# Creating cofactor matrix
__snake_case: Tuple = [
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
]
__snake_case: Dict = (d(matrix[1][1]) * d(matrix[2][2])) - (
d(matrix[1][2]) * d(matrix[2][1])
)
__snake_case: Tuple = -(
(d(matrix[1][0]) * d(matrix[2][2])) - (d(matrix[1][2]) * d(matrix[2][0]))
)
__snake_case: Optional[int] = (d(matrix[1][0]) * d(matrix[2][1])) - (
d(matrix[1][1]) * d(matrix[2][0])
)
__snake_case: Union[str, Any] = -(
(d(matrix[0][1]) * d(matrix[2][2])) - (d(matrix[0][2]) * d(matrix[2][1]))
)
__snake_case: str = (d(matrix[0][0]) * d(matrix[2][2])) - (
d(matrix[0][2]) * d(matrix[2][0])
)
__snake_case: List[Any] = -(
(d(matrix[0][0]) * d(matrix[2][1])) - (d(matrix[0][1]) * d(matrix[2][0]))
)
__snake_case: Optional[Any] = (d(matrix[0][1]) * d(matrix[1][2])) - (
d(matrix[0][2]) * d(matrix[1][1])
)
__snake_case: List[str] = -(
(d(matrix[0][0]) * d(matrix[1][2])) - (d(matrix[0][2]) * d(matrix[1][0]))
)
__snake_case: Optional[int] = (d(matrix[0][0]) * d(matrix[1][1])) - (
d(matrix[0][1]) * d(matrix[1][0])
)
# Transpose the cofactor matrix (Adjoint matrix)
__snake_case: List[Any] = array(SCREAMING_SNAKE_CASE__)
for i in range(3):
for j in range(3):
__snake_case: Tuple = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__snake_case: List[Any] = array(SCREAMING_SNAKE_CASE__)
for i in range(3):
for j in range(3):
inverse_matrix[i][j] /= d(SCREAMING_SNAKE_CASE__)
# Calculate the inverse of the matrix
return [[float(d(SCREAMING_SNAKE_CASE__)) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""")
| 293 | 0 |
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> bool:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return True
lowercase : Any = series[1] - series[0]
for index in range(len(SCREAMING_SNAKE_CASE__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> float:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
lowercase : List[str] = 0
for val in series:
answer += val
return answer / len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __snake_case ( lowerCAmelCase ):
_a : Union[str, Any]= "microsoft/speecht5_tts"
_a : Tuple= (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
_a : Dict= "text_reader"
_a : Optional[Any]= SpeechTaProcessor
_a : Tuple= SpeechTaForTextToSpeech
_a : Optional[int]= SpeechTaHifiGan
_a : Union[str, Any]= ["text"]
_a : Optional[int]= ["audio"]
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.post_processor is None:
lowercase : Any = """microsoft/speecht5_hifigan"""
super().setup()
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ):
'''simple docstring'''
lowercase : int = self.pre_processor(text=snake_case ,return_tensors="""pt""" ,truncation=snake_case )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
lowercase : Tuple = load_dataset("""Matthijs/cmu-arctic-xvectors""" ,split="""validation""" )
lowercase : List[str] = torch.tensor(embeddings_dataset[7305]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
with torch.no_grad():
return self.post_processor(snake_case ).cpu().detach()
| 20 | 1 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : int , _snake_case : int ):
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
lowerCAmelCase : Tuple = number_of_bytes // partitions
lowerCAmelCase : str = []
for i in range(_snake_case ):
lowerCAmelCase : List[str] = i * bytes_per_partition + 1
lowerCAmelCase : str = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314 |
"""simple docstring"""
def _snake_case ( _snake_case : int ):
assert isinstance(_snake_case , _snake_case ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
lowerCAmelCase : Tuple = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(_snake_case )
else:
lowerCAmelCase : str = sylvester(number - 1 )
lowerCAmelCase : Optional[Any] = num - 1
lowerCAmelCase : Optional[Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 314 | 1 |
def a ( snake_case__: float , snake_case__: int ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(snake_case__ ) , snake_case__ )
return number - int(snake_case__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 30 |
'''simple docstring'''
from math import sqrt
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase__ : int = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase__ : Optional[Any] = False
for divisor in range(2 , int(round(sqrt(UpperCamelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase__ : Any = False
break
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'status' must been from type bool"
return status
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase__ : List[str] = list(range(2 , n + 1 ) )
lowerCAmelCase__ : str = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(UpperCamelCase ) ):
for j in range(i + 1 , len(UpperCamelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase__ : List[Any] = 0
# filters actual prime numbers.
lowerCAmelCase__ : List[Any] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase__ : List[str] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(UpperCamelCase ):
ans.append(UpperCamelCase )
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase__ : Optional[Any] = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase__ : Dict = 2
lowerCAmelCase__ : Dict = number
if number == 0 or number == 1:
ans.append(UpperCamelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(UpperCamelCase ):
while quotient != 1:
if is_prime(UpperCamelCase ) and (quotient % factor == 0):
ans.append(UpperCamelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(UpperCamelCase )
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase__ : Optional[int] = 0
# prime factorization of 'number'
lowerCAmelCase__ : List[str] = prime_factorization(UpperCamelCase )
lowerCAmelCase__ : Any = max(UpperCamelCase )
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type int"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase__ : List[Any] = 0
# prime factorization of 'number'
lowerCAmelCase__ : List[str] = prime_factorization(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = min(UpperCamelCase )
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type int"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , UpperCamelCase ), "compare bust been from type bool"
return number % 2 == 0
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , UpperCamelCase ), "compare bust been from type bool"
return number % 2 != 0
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert (
isinstance(UpperCamelCase , UpperCamelCase ) and (number > 2) and is_even(UpperCamelCase )
), "'number' must been an int, even and > 2"
lowerCAmelCase__ : Dict = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase__ : Dict = get_prime_numbers(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = len(UpperCamelCase )
# run variable for while-loops.
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : List[Any] = None
# exit variable. for break up the loops
lowerCAmelCase__ : Any = True
while i < len_pn and loop:
lowerCAmelCase__ : List[Any] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase__ : Optional[Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and (len(UpperCamelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and isinstance(UpperCamelCase , UpperCamelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase__ : int = 0
while numbera != 0:
lowerCAmelCase__ : Any = numbera % numbera
lowerCAmelCase__ : str = numbera
lowerCAmelCase__ : List[str] = rest
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and isinstance(UpperCamelCase , UpperCamelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase__ : int = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase__ : int = prime_factorization(UpperCamelCase )
lowerCAmelCase__ : Any = prime_factorization(UpperCamelCase )
elif numbera == 1 or numbera == 1:
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : List[str] = max(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase__ : int = prime_fac_a.count(UpperCamelCase )
lowerCAmelCase__ : Any = prime_fac_a.count(UpperCamelCase )
for _ in range(max(UpperCamelCase , UpperCamelCase ) ):
ans *= n
else:
lowerCAmelCase__ : Any = prime_fac_a.count(UpperCamelCase )
for _ in range(UpperCamelCase ):
ans *= n
done.append(UpperCamelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase__ : Optional[int] = prime_fac_a.count(UpperCamelCase )
for _ in range(UpperCamelCase ):
ans *= n
done.append(UpperCamelCase )
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Tuple = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(UpperCamelCase ):
ans += 1
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ) and is_prime(
UpperCamelCase ), "'ans' must been a prime number and from type int"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
assert (
is_prime(UpperCamelCase ) and is_prime(UpperCamelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase__ : Dict = p_number_a + 1 # jump to the next number
lowerCAmelCase__ : List[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(UpperCamelCase ):
number += 1
while number < p_number_a:
ans.append(UpperCamelCase )
number += 1
# fetch the next prime number.
while not is_prime(UpperCamelCase ):
number += 1
# precondition
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and ans[0] != p_number_a
and ans[len(UpperCamelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase__ : List[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(UpperCamelCase )
# precondition
assert ans[0] == 1 and ans[len(UpperCamelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase__ : Optional[int] = get_divisors(UpperCamelCase )
# precondition
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and (divisors[0] == 1)
and (divisors[len(UpperCamelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and isinstance(UpperCamelCase , UpperCamelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase__ : int = gcd(abs(UpperCamelCase ) , abs(UpperCamelCase ) )
# precondition
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase__ : str = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : Optional[Any] = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase__ : Dict = ans
ans += fiba
lowerCAmelCase__ : str = tmp
return ans
| 37 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = '''vit_mae'''
def __init__( self : str , __a : Optional[Any]=768 , __a : Tuple=12 , __a : Any=12 , __a : Optional[Any]=3072 , __a : Union[str, Any]="gelu" , __a : Optional[Any]=0.0 , __a : Optional[Any]=0.0 , __a : str=0.02 , __a : int=1E-12 , __a : int=224 , __a : List[str]=16 , __a : Any=3 , __a : Any=True , __a : Union[str, Any]=16 , __a : int=512 , __a : List[str]=8 , __a : Optional[Any]=2048 , __a : Union[str, Any]=0.75 , __a : Dict=False , **__a : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Optional[Any] = hidden_size
__lowercase : Any = num_hidden_layers
__lowercase : Tuple = num_attention_heads
__lowercase : Optional[int] = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : str = hidden_dropout_prob
__lowercase : str = attention_probs_dropout_prob
__lowercase : List[str] = initializer_range
__lowercase : int = layer_norm_eps
__lowercase : List[str] = image_size
__lowercase : Dict = patch_size
__lowercase : Union[str, Any] = num_channels
__lowercase : Optional[int] = qkv_bias
__lowercase : Union[str, Any] = decoder_num_attention_heads
__lowercase : List[str] = decoder_hidden_size
__lowercase : Optional[int] = decoder_num_hidden_layers
__lowercase : List[str] = decoder_intermediate_size
__lowercase : Optional[Any] = mask_ratio
__lowercase : Any = norm_pix_loss | 306 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Dict , __a : List[str]=None , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : int="resnet50" , __a : List[str]=3 , __a : Tuple=32 , __a : Dict=3 , __a : List[str]=True , __a : Union[str, Any]=True , ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = parent
__lowercase : List[str] = out_indices if out_indices is not None else [4]
__lowercase : Optional[int] = stage_names
__lowercase : Any = out_features
__lowercase : Optional[Any] = backbone
__lowercase : Optional[Any] = batch_size
__lowercase : Union[str, Any] = image_size
__lowercase : List[str] = num_channels
__lowercase : str = use_pretrained_backbone
__lowercase : str = is_training
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : str = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Any ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TimmBackbone(config=__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Optional[Any] = model(__a )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.prepare_config_and_inputs()
__lowercase , __lowercase : str = config_and_inputs
__lowercase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = (TimmBackbone,) if is_torch_available() else ()
_A : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
_A : List[Any] = False
_A : List[str] = False
_A : Any = False
_A : Optional[Any] = False
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = TimmBackboneModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = """resnet18"""
__lowercase : Optional[int] = """microsoft/resnet-18"""
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a )
__lowercase : Dict = AutoBackbone.from_pretrained(__a )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] )
__lowercase : Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : List[str] = [*signature.parameters.keys()]
__lowercase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Optional[Any] = True
__lowercase : Union[str, Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowercase : Union[str, Any] = self.all_model_classes[0]
__lowercase : List[Any] = model_class(__a )
model.to(__a )
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
__lowercase : Any = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowercase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
__lowercase : int = model(**__a )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowercase : Any = copy.deepcopy(__a )
__lowercase : Dict = None
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
__lowercase : Optional[int] = model(**__a )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowercase : List[str] = copy.deepcopy(__a )
__lowercase : Optional[Any] = False
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
__lowercase : List[Any] = model(**__a ) | 306 | 1 |
"""simple docstring"""
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _snake_case ( UpperCamelCase : Optional[int] ):
UpperCAmelCase : int = int(UpperCamelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = t // 3600, (t // 60) % 60, t % 60
return F"{h}:{m:02d}:{s:02d}" if h != 0 else F"{m:02d}:{s:02d}"
def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : int=300 ):
# docstyle-ignore
return F"\n <div>\n {prefix}\n <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>\n {label}\n </div>\n "
def _snake_case ( UpperCamelCase : Dict ):
UpperCAmelCase : str = """<table border=\"1\" class=\"dataframe\">\n"""
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F" <th>{i}</th>\n"
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
UpperCAmelCase : Optional[Any] = F"{elt:.6f}" if isinstance(UpperCamelCase , UpperCamelCase ) else str(UpperCamelCase )
html_code += F" <td>{elt}</td>\n"
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class SCREAMING_SNAKE_CASE__ :
__lowerCAmelCase : Optional[int] = 5
__lowerCAmelCase : Tuple = 0.2
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 300 , ) -> Dict:
'''simple docstring'''
UpperCAmelCase : str = total
UpperCAmelCase : Optional[int] = """""" if prefix is None else prefix
UpperCAmelCase : Union[str, Any] = leave
UpperCAmelCase : List[Any] = parent
UpperCAmelCase : Dict = width
UpperCAmelCase : int = None
UpperCAmelCase : List[Any] = None
UpperCAmelCase : Dict = None
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : int = value
if comment is not None:
UpperCAmelCase : Dict = comment
if self.last_value is None:
UpperCAmelCase : List[str] = time.time()
UpperCAmelCase : int = value
UpperCAmelCase : List[Any] = None
UpperCAmelCase : Union[str, Any] = self.warmup
UpperCAmelCase : Any = 1
self.update_bar(_SCREAMING_SNAKE_CASE )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
UpperCAmelCase : int = time.time()
UpperCAmelCase : Dict = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
UpperCAmelCase : Any = self.elapsed_time / (value - self.start_value)
else:
UpperCAmelCase : Any = None
if value >= self.total:
UpperCAmelCase : Tuple = self.total
UpperCAmelCase : Dict = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
UpperCAmelCase : Dict = self.average_time_per_item * (self.total - value)
self.update_bar(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = value
UpperCAmelCase : Union[str, Any] = current_time
if self.average_time_per_item is None:
UpperCAmelCase : List[Any] = 1
else:
UpperCAmelCase : Dict = max(int(self.update_every / self.average_time_per_item ) , 1 )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any = """ """ * (len(str(self.total ) ) - len(str(_SCREAMING_SNAKE_CASE ) )) + str(_SCREAMING_SNAKE_CASE )
if self.elapsed_time is None:
UpperCAmelCase : List[Any] = F"[{spaced_value}/{self.total} : < :"
elif self.predicted_remaining is None:
UpperCAmelCase : Union[str, Any] = F"[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"
else:
UpperCAmelCase : Optional[int] = (
F"[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"
F" {format_time(self.predicted_remaining )}"
)
self.label += F", {1/self.average_time_per_item:.2f} it/s"
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F", {self.comment}]"
self.display()
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[str] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
UpperCAmelCase : Dict = disp.display(disp.HTML(self.html_code ) , display_id=_SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("""""" ) )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> str:
'''simple docstring'''
super().__init__(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = None if column_names is None else [column_names]
UpperCAmelCase : List[Any] = None
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
UpperCAmelCase : Any = disp.display(disp.HTML(self.html_code ) , display_id=_SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if self.inner_table is None:
UpperCAmelCase : Tuple = [list(values.keys() ), list(values.values() )]
else:
UpperCAmelCase : str = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = columns
self.inner_table.append([values[c] for c in columns] )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=300 ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = NotebookProgressBar(_SCREAMING_SNAKE_CASE , prefix=_SCREAMING_SNAKE_CASE , parent=self , width=_SCREAMING_SNAKE_CASE )
return self.child_bar
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = None
self.display()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[Any] = None
UpperCAmelCase : Any = None
UpperCAmelCase : Any = False
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Dict = """Epoch""" if args.evaluation_strategy == IntervalStrategy.EPOCH else """Step"""
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : int = [self.first_column] + ["""Training Loss"""]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("""Validation Loss""" )
UpperCAmelCase : int = NotebookTrainingTracker(state.max_steps , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : List[str] = int(state.epoch ) if int(state.epoch ) == state.epoch else F"{state.epoch:.2f}"
self.training_tracker.update(
state.global_step + 1 , comment=F"Epoch {epoch}/{state.num_train_epochs}" , force_update=self._force_next_update , )
UpperCAmelCase : int = False
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if not has_length(_SCREAMING_SNAKE_CASE ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
UpperCAmelCase : int = self.training_tracker.add_child(len(_SCREAMING_SNAKE_CASE ) )
else:
UpperCAmelCase : Union[str, Any] = NotebookProgressBar(len(_SCREAMING_SNAKE_CASE ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
UpperCAmelCase : int = None
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
UpperCAmelCase : List[Any] = {"""Training Loss""": logs["""loss"""]}
# First column is necessarily Step sine we're not in epoch eval strategy
UpperCAmelCase : Optional[Any] = state.global_step
self.training_tracker.write_line(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
if self.training_tracker is not None:
UpperCAmelCase : Any = {"""Training Loss""": """No log""", """Validation Loss""": """No log"""}
for log in reversed(state.log_history ):
if "loss" in log:
UpperCAmelCase : Optional[int] = log["""loss"""]
break
if self.first_column == "Epoch":
UpperCAmelCase : Dict = int(state.epoch )
else:
UpperCAmelCase : int = state.global_step
UpperCAmelCase : Union[str, Any] = """eval"""
for k in metrics:
if k.endswith("""_loss""" ):
UpperCAmelCase : Optional[Any] = re.sub(r"""\_loss$""" , """""" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = metrics.pop("""total_flos""" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = metrics.pop("""epoch""" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = metrics.pop(F"{metric_key_prefix}_runtime" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = metrics.pop(F"{metric_key_prefix}_samples_per_second" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = metrics.pop(F"{metric_key_prefix}_steps_per_second" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = metrics.pop(F"{metric_key_prefix}_jit_compilation_time" , _SCREAMING_SNAKE_CASE )
for k, v in metrics.items():
if k == F"{metric_key_prefix}_loss":
UpperCAmelCase : Tuple = v
else:
UpperCAmelCase : Union[str, Any] = k.split("""_""" )
UpperCAmelCase : List[str] = """ """.join([part.capitalize() for part in splits[1:]] )
UpperCAmelCase : List[str] = v
self.training_tracker.write_line(_SCREAMING_SNAKE_CASE )
self.training_tracker.remove_child()
UpperCAmelCase : Union[str, Any] = None
# Evaluation takes a long time so we should force the next update.
UpperCAmelCase : Union[str, Any] = True
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=F"Epoch {int(state.epoch )}/{state.num_train_epochs}" , force_update=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = None
| 109 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
A: Optional[int] = logging.get_logger(__name__)
A: Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
A: List[str] = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _snake_case ( UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : Any ):
for attribute in key.split(""".""" ):
UpperCAmelCase : Optional[Any] = getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
UpperCAmelCase : List[Any] = getattr(UpperCamelCase , UpperCamelCase ).shape
else:
UpperCAmelCase : str = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
UpperCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
UpperCAmelCase : str = value
elif weight_type == "weight_v":
UpperCAmelCase : Union[str, Any] = value
elif weight_type == "bias":
UpperCAmelCase : str = value
else:
UpperCAmelCase : Union[str, Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _snake_case ( UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] ):
UpperCAmelCase : Tuple = []
UpperCAmelCase : Any = fairseq_model.state_dict()
UpperCAmelCase : Tuple = hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase : str = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCAmelCase : Dict = True
if "*" in mapped_key:
UpperCAmelCase : str = name.split(UpperCamelCase )[0].split(""".""" )[-2]
UpperCAmelCase : Tuple = mapped_key.replace("""*""" , UpperCamelCase )
if "weight_g" in name:
UpperCAmelCase : Any = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase : Optional[Any] = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
UpperCAmelCase : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase : str = """weight"""
else:
UpperCAmelCase : Optional[Any] = None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def _snake_case ( UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : Any ):
UpperCAmelCase : str = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase : Dict = name.split(""".""" )
UpperCAmelCase : List[str] = int(items[0] )
UpperCAmelCase : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
UpperCAmelCase : Optional[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
UpperCAmelCase : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
UpperCAmelCase : str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
UpperCAmelCase : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def _snake_case ( UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : List[Any]=None ):
# load the pre-trained checkpoints
UpperCAmelCase : List[Any] = torch.load(UpperCamelCase )
UpperCAmelCase : List[str] = WavLMConfigOrig(checkpoint["""cfg"""] )
UpperCAmelCase : Optional[int] = WavLMOrig(UpperCamelCase )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
UpperCAmelCase : List[str] = WavLMConfig.from_pretrained(UpperCamelCase )
else:
UpperCAmelCase : List[Any] = WavLMConfig()
UpperCAmelCase : Any = WavLMModel(UpperCamelCase )
recursively_load_weights(UpperCamelCase , UpperCamelCase )
hf_wavlm.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
A: int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A: Tuple = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 109 | 1 |
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if num <= 0:
raise ValueError("Input must be a positive integer" )
lowercase__ : List[str] = [True] * (num + 1)
lowercase__ : Union[str, Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , lowerCamelCase__ ):
lowercase__ : Any = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 359 |
from math import ceil, sqrt
def __lowerCamelCase ( lowerCamelCase__ = 1_000_000 ):
"""simple docstring"""
lowercase__ : int = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowercase__ : List[str] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowercase__ : List[str] = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'''{solution() = }''')
| 121 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ : Optional[int] = logging.get_logger(__name__)
lowercase_ : Union[str, Any] = '▁'
lowercase_ : Optional[Any] = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
lowercase_ : List[Any] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
lowercase_ : Optional[int] = {
'facebook/s2t-small-librispeech-asr': 10_24,
}
lowercase_ : Dict = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
lowercase_ : int = {'mustc': MUSTC_LANGS}
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : List[str] = VOCAB_FILES_NAMES
snake_case_ : Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case_ : str = MAX_MODEL_INPUT_SIZES
snake_case_ : List[str] = ["input_ids", "attention_mask"]
snake_case_ : List[int] = []
def __init__( self : List[Any] , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Dict="<s>" , snake_case__ : int="</s>" , snake_case__ : Dict="<pad>" , snake_case__ : Any="<unk>" , snake_case__ : List[Any]=False , snake_case__ : Optional[Any]=False , snake_case__ : str=None , snake_case__ : List[str]=None , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : List[Any] , ):
"""simple docstring"""
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , do_upper_case=snake_case__ , do_lower_case=snake_case__ , tgt_lang=snake_case__ , lang_codes=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
_UpperCAmelCase = do_upper_case
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = load_json(snake_case__ )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
_UpperCAmelCase = spm_file
_UpperCAmelCase = load_spm(snake_case__ , self.sp_model_kwargs )
if lang_codes is not None:
_UpperCAmelCase = lang_codes
_UpperCAmelCase = LANGUAGES[lang_codes]
_UpperCAmelCase = [F"""<lang:{lang}>""" for lang in self.langs]
_UpperCAmelCase = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
_UpperCAmelCase = self.lang_tokens
_UpperCAmelCase = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_UpperCAmelCase = {}
@property
def UpperCamelCase ( self : int ):
"""simple docstring"""
return len(self.encoder )
@property
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
_UpperCAmelCase = new_tgt_lang
self.set_tgt_lang_special_tokens(snake_case__ )
def UpperCamelCase ( self : Any , snake_case__ : str ):
"""simple docstring"""
_UpperCAmelCase = self.lang_code_to_id[tgt_lang]
_UpperCAmelCase = [lang_code_id]
def UpperCamelCase ( self : Tuple , snake_case__ : str ):
"""simple docstring"""
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def UpperCamelCase ( self : Any , snake_case__ : Tuple ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder[self.unk_token] )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : int ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def UpperCamelCase ( self : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_UpperCAmelCase = self.sp_model.decode(snake_case__ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_UpperCAmelCase = []
else:
current_sub_tokens.append(snake_case__ )
_UpperCAmelCase = self.sp_model.decode(snake_case__ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCamelCase ( self : List[Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any]=None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
_UpperCAmelCase = [1] * len(self.prefix_tokens )
_UpperCAmelCase = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case__ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case__ )) + ([0] * len(snake_case__ )) + suffix_ones
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCAmelCase = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ):
"""simple docstring"""
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self : Tuple , snake_case__ : Dict ):
"""simple docstring"""
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase = {}
_UpperCAmelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase ( self : List[str] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
_UpperCAmelCase = Path(snake_case__ )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
_UpperCAmelCase = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
_UpperCAmelCase = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , snake_case__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case__ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case__ , "wb" ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (str(snake_case__ ), str(snake_case__ ))
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = sentencepiece.SentencePieceProcessor(**snake_case_ )
spm.Load(str(snake_case_ ) )
return spm
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
with open(snake_case_ , "r" ) as f:
return json.load(snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
with open(snake_case_ , "w" ) as f:
json.dump(snake_case_ , snake_case_ , indent=2 )
| 133 |
def __SCREAMING_SNAKE_CASE ( snake_case_ = 1000 ):
'''simple docstring'''
_UpperCAmelCase = 2**power
_UpperCAmelCase = 0
while n:
_UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 133 | 1 |
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class A ( UpperCAmelCase_ ):
def lowercase_ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = 5
# Realm tok
UpperCAmelCase__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase__ = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
UpperCAmelCase__ = os.path.join(__UpperCAmelCase , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCAmelCase__ = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
def lowercase_ (self : int ) -> RealmTokenizer:
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def lowercase_ (self : Dict ) -> List[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase_ (self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = RealmConfig(num_block_records=self.num_block_records )
return config
def lowercase_ (self : Optional[int] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def lowercase_ (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=__UpperCAmelCase , )
return block_records
def lowercase_ (self : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowercase_ (self : int ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.get_config()
UpperCAmelCase__ = self.get_dummy_retriever()
UpperCAmelCase__ = retriever.tokenizer
UpperCAmelCase__ = np.array([0, 3] , dtype="long" )
UpperCAmelCase__ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase__ = tokenizer(
["the fourth"] , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , ).input_ids
UpperCAmelCase__ = config.reader_seq_len
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = retriever(
__UpperCAmelCase , __UpperCAmelCase , answer_ids=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors="np" )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def lowercase_ (self : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.get_config()
UpperCAmelCase__ = self.get_dummy_retriever()
UpperCAmelCase__ = retriever.tokenizer
UpperCAmelCase__ = np.array([0, 3, 5] , dtype="long" )
UpperCAmelCase__ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase__ = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , ).input_ids
UpperCAmelCase__ = config.reader_seq_len
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = retriever(
__UpperCAmelCase , __UpperCAmelCase , answer_ids=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors="np" )
self.assertEqual([False, True, True] , __UpperCAmelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __UpperCAmelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __UpperCAmelCase )
def lowercase_ (self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
UpperCAmelCase__ = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
UpperCAmelCase__ = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCAmelCase__ = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 350 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = 'facebook/bart-large-mnli'
__UpperCAmelCase : Optional[Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
__UpperCAmelCase : Optional[int] = 'text_classifier'
__UpperCAmelCase : int = AutoTokenizer
__UpperCAmelCase : Dict = AutoModelForSequenceClassification
__UpperCAmelCase : int = ['text', ['text']]
__UpperCAmelCase : Optional[int] = ['text']
def lowercase_ (self : List[Any] ) -> List[str]:
"""simple docstring"""
super().setup()
UpperCAmelCase__ = self.model.config
UpperCAmelCase__ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
UpperCAmelCase__ = int(__UpperCAmelCase )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = labels
return self.pre_processor(
[text] * len(__UpperCAmelCase ) , [f"""This example is {label}""" for label in labels] , return_tensors="pt" , padding="max_length" , )
def lowercase_ (self : Dict , __UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = outputs.logits
UpperCAmelCase__ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 143 | 0 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any):
return (abs(source - target) / target) < 0.01
@pytest.mark.integration
def lowercase_ ( _lowerCamelCase : Any):
lowercase__ : Optional[int] = _TestCommandArgs(dataset=_lowerCamelCase , all_configs=_lowerCamelCase , save_infos=_lowerCamelCase)
lowercase__ : int = TestCommand(*_lowerCamelCase)
test_command.run()
lowercase__ : Dict = os.path.join(_lowerCamelCase , "README.md")
assert os.path.exists(_lowerCamelCase)
lowercase__ : str = DatasetInfosDict.from_directory(_lowerCamelCase)
lowercase__ : Dict = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string")),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"])),
"langs": Sequence(Value("string")),
"spans": Sequence(Value("string")),
}) , splits=[
{
"name": "train",
"num_bytes": 235_1563,
"num_examples": 1_0000,
},
{
"name": "validation",
"num_bytes": 23_8418,
"num_examples": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
})
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase__ , lowercase__ : Union[str, Any] = getattr(dataset_infos["default"] , _lowerCamelCase), getattr(expected_dataset_infos["default"] , _lowerCamelCase)
if key == "num_bytes":
assert is_apercent_close(_lowerCamelCase , _lowerCamelCase)
elif key == "splits":
assert list(_lowerCamelCase) == list(_lowerCamelCase)
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes)
else:
result == expected
| 87 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | 1 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ) -> Tuple:
assert isinstance(_A , _A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ = tmp_path / 'cache'
UpperCAmelCase_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase_ = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_A , keep_in_memory=_A ).read()
_check_sql_dataset(_A , _A )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ) -> Any:
UpperCAmelCase_ = tmp_path / 'cache'
UpperCAmelCase_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCAmelCase_ = features.copy() if features else default_expected_features
UpperCAmelCase_ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=_A , cache_dir=_A ).read()
_check_sql_dataset(_A , _A )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Tuple:
with contextlib.closing(sqlitea.connect(_A ) ) as con:
UpperCAmelCase_ = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : str ) -> Optional[Any]:
UpperCAmelCase_ = tmp_path / 'cache'
UpperCAmelCase_ = os.path.join(_A , '''tmp.sql''' )
UpperCAmelCase_ = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_A ).read()
SqlDatasetWriter(_A , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
UpperCAmelCase_ = iter_sql_file(_A )
UpperCAmelCase_ = iter_sql_file(_A )
for rowa, rowa in zip(_A , _A ):
assert rowa == rowa
@require_sqlalchemy
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : List[str] ) -> List[str]:
UpperCAmelCase_ = tmp_path / 'cache'
UpperCAmelCase_ = os.path.join(_A , '''tmp.sql''' )
UpperCAmelCase_ = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_A ).read()
SqlDatasetWriter(_A , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
UpperCAmelCase_ = iter_sql_file(_A )
UpperCAmelCase_ = iter_sql_file(_A )
for rowa, rowa in zip(_A , _A ):
assert rowa == rowa
@require_sqlalchemy
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : str ) -> List[Any]:
UpperCAmelCase_ = tmp_path / 'cache'
UpperCAmelCase_ = os.path.join(_A , '''tmp.sql''' )
UpperCAmelCase_ = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_A ).read()
with pytest.raises(_A ):
SqlDatasetWriter(_A , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 353 |
from __future__ import annotations
import os
from collections.abc import Mapping
_lowerCamelCase = tuple[int, int]
class a :
'''simple docstring'''
def __init__( self : str , __snake_case : set[int] , __snake_case : Mapping[EdgeT, int] ):
UpperCAmelCase_ = vertices
UpperCAmelCase_ = {
(min(__snake_case ), max(__snake_case )): weight for edge, weight in edges.items()
}
def lowerCamelCase_ ( self : Any , __snake_case : EdgeT , __snake_case : int ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
UpperCAmelCase_ = weight
def lowerCamelCase_ ( self : Union[str, Any] ):
UpperCAmelCase_ = Graph({min(self.vertices )} , {} )
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
while len(subgraph.vertices ) < len(self.vertices ):
UpperCAmelCase_ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
UpperCAmelCase_ = edge
UpperCAmelCase_ = weight
subgraph.add_edge(__snake_case , __snake_case )
return subgraph
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str = "p107_network.txt" ) -> int:
UpperCAmelCase_ = os.path.abspath(os.path.dirname(__UpperCamelCase ) )
UpperCAmelCase_ = os.path.join(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase_ = {}
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
with open(__UpperCamelCase ) as f:
UpperCAmelCase_ = f.read().strip().split('''\n''' )
UpperCAmelCase_ = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(__UpperCamelCase ) ):
for edgea in range(__UpperCamelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
UpperCAmelCase_ = int(adjaceny_matrix[edgea][edgea] )
UpperCAmelCase_ = Graph(set(range(len(__UpperCamelCase ) ) ) , __UpperCamelCase )
UpperCAmelCase_ = graph.prims_algorithm()
UpperCAmelCase_ = sum(graph.edges.values() )
UpperCAmelCase_ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 177 | 0 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def __lowerCAmelCase ( a__ ) -> Optional[Any]:
__a = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F"""{test_file} instead.""" )
__a = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
__a = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
__a = '''.'''.join(a__ )
return test_module_path
def __lowerCAmelCase ( a__ ) -> List[str]:
__a = get_module_path(a__ )
__a = importlib.import_module(a__ )
return test_module
def __lowerCAmelCase ( a__ ) -> List[str]:
__a = []
__a = get_test_module(a__ )
for attr in dir(a__ ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(a__ , a__ ) )
# sort with class names
return sorted(a__ , key=lambda a__ : x.__name__ )
def __lowerCAmelCase ( a__ ) -> Optional[Any]:
__a = []
__a = get_test_module(a__ )
for attr in dir(a__ ):
__a = getattr(a__ , a__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__a = getattr(a__ , '''all_model_classes''' , [] )
if len(a__ ) > 0:
test_classes.append(a__ )
# sort with class names
return sorted(a__ , key=lambda a__ : x.__name__ )
def __lowerCAmelCase ( a__ ) -> Optional[int]:
__a = get_test_classes(a__ )
__a = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(a__ , key=lambda a__ : x.__name__ )
def __lowerCAmelCase ( a__ ) -> List[Any]:
__a = test_class()
if hasattr(a__ , '''setUp''' ):
test.setUp()
__a = None
if hasattr(a__ , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__a = test.model_tester.__class__
return model_tester
def __lowerCAmelCase ( a__ , a__ ) -> Optional[Any]:
__a = get_test_classes(a__ )
__a = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(a__ )
# sort with class names
return sorted(a__ , key=lambda a__ : x.__name__ )
def __lowerCAmelCase ( a__ , a__ ) -> Optional[int]:
__a = get_test_classes_for_model(a__ , a__ )
__a = []
for test_class in test_classes:
__a = get_model_tester_from_test_class(a__ )
if tester_class is not None:
tester_classes.append(a__ )
# sort with class names
return sorted(a__ , key=lambda a__ : x.__name__ )
def __lowerCAmelCase ( a__ ) -> Optional[Any]:
__a = get_test_classes(a__ )
__a = {test_class: get_model_tester_from_test_class(a__ ) for test_class in test_classes}
return test_tester_mapping
def __lowerCAmelCase ( a__ ) -> List[str]:
__a = get_model_classes(a__ )
__a = {
model_class: get_test_classes_for_model(a__ , a__ ) for model_class in model_classes
}
return model_test_mapping
def __lowerCAmelCase ( a__ ) -> Union[str, Any]:
__a = get_model_classes(a__ )
__a = {
model_class: get_tester_classes_for_model(a__ , a__ ) for model_class in model_classes
}
return model_to_tester_mapping
def __lowerCAmelCase ( a__ ) -> Union[str, Any]:
if isinstance(a__ , a__ ):
return o
elif isinstance(a__ , a__ ):
return o.__name__
elif isinstance(a__ , (list, tuple) ):
return [to_json(a__ ) for x in o]
elif isinstance(a__ , a__ ):
return {to_json(a__ ): to_json(a__ ) for k, v in o.items()}
else:
return o | 6 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = (1 - _cos) / 2
__a = 1 - _cos
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = (1 + _cos) / 2
__a = -1 - _cos
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = _sin / 2
__a = 0
__a = -ba
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 1 - alpha
__a = -2 * _cos
__a = 1 + alpha
__a = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = 1 + alpha * big_a
__a = -2 * _cos
__a = 1 - alpha * big_a
__a = 1 + alpha / big_a
__a = -2 * _cos
__a = 1 - alpha / big_a
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = (big_a + 1) - (big_a - 1) * _cos
__a = (big_a + 1) + (big_a - 1) * _cos
__a = (big_a - 1) - (big_a + 1) * _cos
__a = (big_a - 1) + (big_a + 1) * _cos
__a = 2 * sqrt(a__ ) * alpha
__a = big_a * (pmc + aaa)
__a = 2 * big_a * mpc
__a = big_a * (pmc - aaa)
__a = ppmc + aaa
__a = -2 * pmpc
__a = ppmc - aaa
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = (big_a + 1) - (big_a - 1) * _cos
__a = (big_a + 1) + (big_a - 1) * _cos
__a = (big_a - 1) - (big_a + 1) * _cos
__a = (big_a - 1) + (big_a + 1) * _cos
__a = 2 * sqrt(a__ ) * alpha
__a = big_a * (ppmc + aaa)
__a = -2 * big_a * pmpc
__a = big_a * (ppmc - aaa)
__a = pmc + aaa
__a = 2 * mpc
__a = pmc - aaa
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt | 6 | 1 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__UpperCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowercase__ ( __snake_case : str , __snake_case : str ):
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
UpperCAmelCase_ : List[Any] = XLMProphetNetForConditionalGenerationOld.from_pretrained(__snake_case )
UpperCAmelCase_ : Any = XLMProphetNetForConditionalGeneration.from_pretrained(
__snake_case , output_loading_info=__snake_case )
else:
UpperCAmelCase_ : Tuple = ProphetNetForConditionalGenerationOld.from_pretrained(__snake_case )
UpperCAmelCase_ : str = ProphetNetForConditionalGeneration.from_pretrained(
__snake_case , output_loading_info=__snake_case )
UpperCAmelCase_ : Tuple = ['key_proj', 'value_proj', 'query_proj']
UpperCAmelCase_ : int = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
UpperCAmelCase_ : List[str] = key.split('.' )
if attributes[0] == "lm_head":
UpperCAmelCase_ : Union[str, Any] = prophet
UpperCAmelCase_ : Dict = prophet_old
else:
UpperCAmelCase_ : Optional[Any] = prophet.prophetnet
UpperCAmelCase_ : int = prophet_old.model
UpperCAmelCase_ : List[str] = False
for attribute in attributes:
if attribute in mapping:
UpperCAmelCase_ : Optional[int] = mapping[attribute]
if not hasattr(__snake_case , __snake_case ) and len(__snake_case ) > 0:
UpperCAmelCase_ : Any = attribute
elif hasattr(__snake_case , __snake_case ):
UpperCAmelCase_ : List[str] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
UpperCAmelCase_ : Optional[int] = old_model.weight
logger.info(F"{attribute} is initialized." )
UpperCAmelCase_ : str = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
UpperCAmelCase_ : str = old_model.bias
logger.info(F"{attribute} is initialized" )
UpperCAmelCase_ : Optional[int] = True
break
elif attribute in special_keys and hasattr(__snake_case , 'in_proj_weight' ):
UpperCAmelCase_ : Union[str, Any] = old_model.in_proj_weight.shape[0] // 3
UpperCAmelCase_ : List[Any] = getattr(__snake_case , __snake_case )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
UpperCAmelCase_ : int = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
UpperCAmelCase_ : Dict = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
UpperCAmelCase_ : Tuple = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
UpperCAmelCase_ : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
UpperCAmelCase_ : int = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
UpperCAmelCase_ : List[str] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
UpperCAmelCase_ : Optional[Any] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
UpperCAmelCase_ : Tuple = nn.Parameter(old_model.embed_positions.weight[:512, :] )
UpperCAmelCase_ : List[Any] = True
break
if attribute.isdigit():
UpperCAmelCase_ : Optional[Any] = model[int(__snake_case )]
UpperCAmelCase_ : Tuple = old_model[int(__snake_case )]
else:
UpperCAmelCase_ : Union[str, Any] = getattr(__snake_case , __snake_case )
if old_attribute == "":
UpperCAmelCase_ : Any = old_model
else:
if not hasattr(__snake_case , __snake_case ):
raise ValueError(F"{old_model} does not have {old_attribute}" )
UpperCAmelCase_ : Tuple = getattr(__snake_case , __snake_case )
if not is_key_init:
raise ValueError(F"{key} was not correctly initialized!" )
print(F"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(__snake_case )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 352 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase (_snake_case , _snake_case ):
'''simple docstring'''
@register_to_config
def __init__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None ) -> int:
super().__init__()
UpperCAmelCase_ : str = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCAmelCase_ : Optional[Any] = torch.zeros(_UpperCamelCase , _UpperCamelCase )
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = torch.nn.Parameter(_UpperCamelCase )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : VQModel
_snake_case : CLIPTextModel
_snake_case : CLIPTokenizer
_snake_case : TransformeraDModel
_snake_case : LearnedClassifierFreeSamplingEmbeddings
_snake_case : VQDiffusionScheduler
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> List[Any]:
super().__init__()
self.register_modules(
vqvae=_UpperCamelCase , transformer=_UpperCamelCase , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase , scheduler=_UpperCamelCase , learned_classifier_free_sampling_embeddings=_UpperCamelCase , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = len(_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else 1
# get prompt text embeddings
UpperCAmelCase_ : str = self.tokenizer(
_UpperCamelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
UpperCAmelCase_ : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase_ : List[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCAmelCase_ : str = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase_ : str = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCAmelCase_ : Dict = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_UpperCamelCase )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase_ : Dict = prompt_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCAmelCase_ : List[str] = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCAmelCase_ : List[str] = negative_prompt_embeds.unsqueeze(0 ).repeat(_UpperCamelCase , 1 , 1 )
else:
UpperCAmelCase_ : List[Any] = [''] * batch_size
UpperCAmelCase_ : List[Any] = text_input_ids.shape[-1]
UpperCAmelCase_ : Dict = self.tokenizer(
_UpperCamelCase , padding='max_length' , max_length=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors='pt' , )
UpperCAmelCase_ : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCAmelCase_ : Dict = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_UpperCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase_ : List[Any] = negative_prompt_embeds.shape[1]
UpperCAmelCase_ : Dict = negative_prompt_embeds.repeat(1 , _UpperCamelCase , 1 )
UpperCAmelCase_ : Any = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ : Union[str, Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , _UpperCamelCase , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 5.0 , _UpperCamelCase = 1.0 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Any = 1
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = len(_UpperCamelCase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : Union[str, Any] = batch_size * num_images_per_prompt
UpperCAmelCase_ : Optional[int] = guidance_scale > 1.0
UpperCAmelCase_ : Any = self._encode_prompt(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCamelCase , _UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(_UpperCamelCase )}." )
# get the initial completely masked latents unless the user supplied it
UpperCAmelCase_ : Optional[int] = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCAmelCase_ : Tuple = self.transformer.num_vector_embeds - 1
UpperCAmelCase_ : List[Any] = torch.full(_UpperCamelCase , _UpperCamelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
f" {self.transformer.num_vector_embeds - 1} (inclusive)." )
UpperCAmelCase_ : Any = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_UpperCamelCase , device=self.device )
UpperCAmelCase_ : List[str] = self.scheduler.timesteps.to(self.device )
UpperCAmelCase_ : Union[str, Any] = latents
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the sample if we are doing classifier free guidance
UpperCAmelCase_ : Union[str, Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCAmelCase_ : Dict = self.transformer(_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , timestep=_UpperCamelCase ).sample
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = model_output.chunk(2 )
UpperCAmelCase_ : Optional[int] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_UpperCamelCase , dim=1 , keepdim=_UpperCamelCase )
UpperCAmelCase_ : str = self.truncate(_UpperCamelCase , _UpperCamelCase )
# remove `log(0)`'s (`-inf`s)
UpperCAmelCase_ : Optional[int] = model_output.clamp(-7_0 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : Union[str, Any] = self.scheduler.step(_UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , generator=_UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : str = self.vqvae.config.vq_embed_dim
UpperCAmelCase_ : Optional[int] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCAmelCase_ : int = self.vqvae.quantize.get_codebook_entry(_UpperCamelCase , shape=_UpperCamelCase )
UpperCAmelCase_ : Dict = self.vqvae.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase ).sample
UpperCAmelCase_ : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ : int = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> torch.FloatTensor:
UpperCAmelCase_ , UpperCAmelCase_ : int = torch.sort(_UpperCamelCase , 1 , descending=_UpperCamelCase )
UpperCAmelCase_ : Dict = torch.exp(_UpperCamelCase )
UpperCAmelCase_ : int = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCAmelCase_ : Tuple = torch.full_like(keep_mask[:, 0:1, :] , _UpperCamelCase )
UpperCAmelCase_ : List[str] = torch.cat((all_true, keep_mask) , dim=1 )
UpperCAmelCase_ : int = keep_mask[:, :-1, :]
UpperCAmelCase_ : Any = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCAmelCase_ : str = log_p_x_0.clone()
UpperCAmelCase_ : Any = -torch.inf # -inf = log(0)
return rv
| 145 | 0 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Any = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class A__ ( __snake_case ):
_UpperCAmelCase :Dict = 'bart'
_UpperCAmelCase :str = ['past_key_values']
_UpperCAmelCase :Any = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , A_=5_0265 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=3 , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , **A_ , ):
'''simple docstring'''
UpperCamelCase : int = vocab_size
UpperCamelCase : List[Any] = max_position_embeddings
UpperCamelCase : Any = d_model
UpperCamelCase : Optional[Any] = encoder_ffn_dim
UpperCamelCase : List[Any] = encoder_layers
UpperCamelCase : int = encoder_attention_heads
UpperCamelCase : Optional[int] = decoder_ffn_dim
UpperCamelCase : List[str] = decoder_layers
UpperCamelCase : Optional[int] = decoder_attention_heads
UpperCamelCase : int = dropout
UpperCamelCase : int = attention_dropout
UpperCamelCase : Tuple = activation_dropout
UpperCamelCase : Tuple = activation_function
UpperCamelCase : int = init_std
UpperCamelCase : List[Any] = encoder_layerdrop
UpperCamelCase : List[str] = decoder_layerdrop
UpperCamelCase : Dict = classifier_dropout
UpperCamelCase : Optional[int] = use_cache
UpperCamelCase : List[Any] = encoder_layers
UpperCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , A_ ):
UpperCamelCase : int = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"The config can simply be saved and uploaded again to be fixed." )
class A__ ( __snake_case ):
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Optional[int] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCamelCase : List[str] = {0: "batch"}
UpperCamelCase : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCamelCase : Dict = {0: "batch", 1: "decoder_sequence"}
UpperCamelCase : Union[str, Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(A_ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCamelCase : Any = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCamelCase , UpperCamelCase : Optional[int] = self.num_layers
for i in range(A_ ):
UpperCamelCase : Optional[Any] = {0: "batch", 2: "past_sequence + sequence"}
UpperCamelCase : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"}
else:
UpperCamelCase : Optional[Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Tuple = super().outputs
else:
UpperCamelCase : Dict = super(A_ , self ).outputs
if self.use_past:
UpperCamelCase , UpperCamelCase : int = self.num_layers
for i in range(A_ ):
UpperCamelCase : int = {0: "batch", 2: "past_sequence + sequence"}
UpperCamelCase : Tuple = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ):
'''simple docstring'''
UpperCamelCase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
# Generate decoder inputs
UpperCamelCase : List[Any] = seq_length if not self.use_past else 1
UpperCamelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
UpperCamelCase : Optional[int] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
UpperCamelCase : List[Any] = dict(**A_ , **A_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCamelCase , UpperCamelCase : Optional[Any] = common_inputs["input_ids"].shape
UpperCamelCase : List[Any] = common_inputs["decoder_input_ids"].shape[1]
UpperCamelCase , UpperCamelCase : List[str] = self.num_attention_heads
UpperCamelCase : int = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase : List[Any] = decoder_seq_length + 3
UpperCamelCase : str = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCamelCase : int = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(A_ , A_ )] , dim=1 )
UpperCamelCase : int = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCamelCase , UpperCamelCase : Union[str, Any] = self.num_layers
UpperCamelCase : Any = min(A_ , A_ )
UpperCamelCase : List[str] = max(A_ , A_ ) - min_num_layers
UpperCamelCase : Dict = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(A_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(A_ ),
torch.zeros(A_ ),
torch.zeros(A_ ),
torch.zeros(A_ ),
) )
# TODO: test this.
UpperCamelCase : Optional[Any] = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(A_ , A_ ):
common_inputs["past_key_values"].append((torch.zeros(A_ ), torch.zeros(A_ )) )
return common_inputs
def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ):
'''simple docstring'''
UpperCamelCase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCamelCase , UpperCamelCase : Union[str, Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
UpperCamelCase : Optional[Any] = seqlen + 2
UpperCamelCase , UpperCamelCase : List[Any] = self.num_layers
UpperCamelCase , UpperCamelCase : Optional[int] = self.num_attention_heads
UpperCamelCase : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase : Optional[Any] = common_inputs["attention_mask"].dtype
UpperCamelCase : int = torch.cat(
[common_inputs["attention_mask"], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 )
UpperCamelCase : Optional[Any] = [
(torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(A_ )
]
return common_inputs
def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase : Union[str, Any] = tokenizer.num_special_tokens_to_add(A_ )
UpperCamelCase : int = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase : int = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCamelCase : Dict = dict(tokenizer(A_ , return_tensors=A_ ) )
return common_inputs
def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
elif self.task == "causal-lm":
UpperCamelCase : List[str] = self._generate_dummy_inputs_for_causal_lm(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
else:
UpperCamelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
return common_inputs
def __UpperCamelCase( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Optional[Any] = super()._flatten_past_key_values_(A_ , A_ , A_ , A_ )
else:
UpperCamelCase : Optional[Any] = super(A_ , self )._flatten_past_key_values_(
A_ , A_ , A_ , A_ )
| 52 |
def a__ ( UpperCAmelCase : int ) -> int:
UpperCAmelCase : list[list[int]] = [[0 for _ in range(UpperCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , UpperCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_lowerCamelCase : List[Any] = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
_lowerCamelCase : str = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 336 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__UpperCAmelCase : Tuple = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__UpperCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__, metadata={"help": "Pretrained config name or path if not the same as model_name"})
__UpperCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__UpperCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
__UpperCamelCase : bool = field(default=SCREAMING_SNAKE_CASE__, metadata={"help": "Whether tp freeze the encoder."})
__UpperCamelCase : bool = field(default=SCREAMING_SNAKE_CASE__, metadata={"help": "Whether to freeze the embeddings."})
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."})
__UpperCamelCase : Optional[str] = field(
default="summarization", metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"}, )
__UpperCamelCase : Optional[int] = field(
default=1024, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
__UpperCamelCase : Optional[int] = field(
default=128, metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
__UpperCamelCase : Optional[int] = field(
default=142, metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
}, )
__UpperCamelCase : Optional[int] = field(
default=142, metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
__UpperCamelCase : Optional[int] = field(default=-1, metadata={"help": "# training examples. -1 means use all."})
__UpperCamelCase : Optional[int] = field(default=-1, metadata={"help": "# validation examples. -1 means use all."})
__UpperCamelCase : Optional[int] = field(default=-1, metadata={"help": "# test examples. -1 means use all."})
__UpperCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__, metadata={"help": "Source language id for translation."})
__UpperCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__, metadata={"help": "Target language id for translation."})
__UpperCamelCase : Optional[int] = field(default=SCREAMING_SNAKE_CASE__, metadata={"help": "# num_beams to use for evaluation."})
__UpperCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__, metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."}, )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , F"""{split}_results.json""" ) )
def _UpperCAmelCase ( ):
"""simple docstring"""
UpperCamelCase : int = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = parser.parse_args_into_dataclasses()
check_output_dir(lowerCAmelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , lowerCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase : str = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
assert hasattr(lowerCAmelCase__ , lowerCAmelCase__ ), F"""({config.__class__.__name__}) doesn\'t have a `{p}` attribute"""
setattr(lowerCAmelCase__ , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase : Tuple = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowerCAmelCase__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCamelCase : int = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowerCAmelCase__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCamelCase : List[str] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowerCAmelCase__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCamelCase : str = SeqaSeqDataset
# Get datasets
UpperCamelCase : Any = (
dataset_class(
lowerCAmelCase__ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
UpperCamelCase : Optional[Any] = (
dataset_class(
lowerCAmelCase__ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCamelCase : Optional[int] = (
dataset_class(
lowerCAmelCase__ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCamelCase : str = (
build_compute_metrics_fn(data_args.task , lowerCAmelCase__ ) if training_args.predict_with_generate else None
)
UpperCamelCase : Optional[int] = SeqaSeqTrainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , data_args=lowerCAmelCase__ , train_dataset=lowerCAmelCase__ , eval_dataset=lowerCAmelCase__ , data_collator=SeqaSeqDataCollator(
lowerCAmelCase__ , lowerCAmelCase__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , )
UpperCamelCase : Optional[int] = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
UpperCamelCase : Union[str, Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCamelCase : List[Any] = train_result.metrics
UpperCamelCase : List[str] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , lowerCAmelCase__ , training_args.output_dir )
all_metrics.update(lowerCAmelCase__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCamelCase : int = trainer.evaluate(metric_key_prefix='''val''' )
UpperCamelCase : List[str] = data_args.n_val
UpperCamelCase : Any = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , lowerCAmelCase__ , training_args.output_dir )
all_metrics.update(lowerCAmelCase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
UpperCamelCase : Union[str, Any] = trainer.predict(test_dataset=lowerCAmelCase__ , metric_key_prefix='''test''' )
UpperCamelCase : Tuple = test_output.metrics
UpperCamelCase : List[str] = data_args.n_test
if trainer.is_world_process_zero():
UpperCamelCase : Union[str, Any] = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , lowerCAmelCase__ , training_args.output_dir )
all_metrics.update(lowerCAmelCase__ )
if training_args.predict_with_generate:
UpperCamelCase : List[str] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
UpperCamelCase : Dict = lmap(str.strip , lowerCAmelCase__ )
write_txt_file(lowerCAmelCase__ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(lowerCAmelCase__ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 359 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 315 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _lowercase (unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case__ , snake_case__=7 , snake_case__=3 , snake_case__=18 , snake_case__=30 , snake_case__=400 , snake_case__=True , snake_case__=None , snake_case__=True , snake_case__=False , snake_case__=True , snake_case__=True , snake_case__=[0.5, 0.5, 0.5] , snake_case__=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = image_size
UpperCamelCase_ = min_resolution
UpperCamelCase_ = max_resolution
UpperCamelCase_ = do_resize
UpperCamelCase_ = size if size is not None else {"height": 18, "width": 20}
UpperCamelCase_ = do_thumbnail
UpperCamelCase_ = do_align_axis
UpperCamelCase_ = do_pad
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean
UpperCamelCase_ = image_std
def _lowerCamelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowercase (a_ , unittest.TestCase ):
'''simple docstring'''
lowercase__ = DonutImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = DonutImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_resize" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
self.assertTrue(hasattr(snake_case__ , "do_thumbnail" ) )
self.assertTrue(hasattr(snake_case__ , "do_align_long_axis" ) )
self.assertTrue(hasattr(snake_case__ , "do_pad" ) )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "image_mean" ) )
self.assertTrue(hasattr(snake_case__ , "image_std" ) )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 20} )
UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"height": 84, "width": 42} )
def _lowerCamelCase ( self ):
'''simple docstring'''
pass
@is_flaky()
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCamelCase_ = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCamelCase_ = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCamelCase_ = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 128 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = (IPNDMScheduler,)
lowercase__ = (("""num_inference_steps""", 50),)
def _lowerCamelCase ( self , **snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = {"num_train_timesteps": 1000}
config.update(**snake_case__ )
return config
def _lowerCamelCase ( self , snake_case__=0 , **snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = dict(self.forward_default_kwargs )
UpperCamelCase_ = kwargs.pop("num_inference_steps" , snake_case__ )
UpperCamelCase_ = self.dummy_sample
UpperCamelCase_ = 0.1 * sample
UpperCamelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCamelCase_ = self.get_scheduler_config(**snake_case__ )
UpperCamelCase_ = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
UpperCamelCase_ = dummy_past_residuals[:]
if time_step is None:
UpperCamelCase_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
UpperCamelCase_ = scheduler_class.from_pretrained(snake_case__ )
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
UpperCamelCase_ = dummy_past_residuals[:]
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
UpperCamelCase_ = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
UpperCamelCase_ = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _lowerCamelCase ( self ):
'''simple docstring'''
pass
def _lowerCamelCase ( self , snake_case__=0 , **snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = dict(self.forward_default_kwargs )
UpperCamelCase_ = kwargs.pop("num_inference_steps" , snake_case__ )
UpperCamelCase_ = self.dummy_sample
UpperCamelCase_ = 0.1 * sample
UpperCamelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase_ = dummy_past_residuals[:]
if time_step is None:
UpperCamelCase_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
UpperCamelCase_ = scheduler_class.from_pretrained(snake_case__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase_ = dummy_past_residuals[:]
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
UpperCamelCase_ = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
UpperCamelCase_ = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _lowerCamelCase ( self , **snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config(**snake_case__ )
UpperCamelCase_ = scheduler_class(**snake_case__ )
UpperCamelCase_ = 10
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase_ = model(snake_case__ , snake_case__ )
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase_ = model(snake_case__ , snake_case__ )
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
return sample
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = dict(self.forward_default_kwargs )
UpperCamelCase_ = kwargs.pop("num_inference_steps" , snake_case__ )
for scheduler_class in self.scheduler_classes:
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**snake_case__ )
UpperCamelCase_ = self.dummy_sample
UpperCamelCase_ = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case__ , "set_timesteps" ):
scheduler.set_timesteps(snake_case__ )
elif num_inference_steps is not None and not hasattr(snake_case__ , "set_timesteps" ):
UpperCamelCase_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCamelCase_ = dummy_past_residuals[:]
UpperCamelCase_ = scheduler.timesteps[5]
UpperCamelCase_ = scheduler.timesteps[6]
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ , time_step=snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=snake_case__ , time_step=snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.full_loop()
UpperCamelCase_ = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 128 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 183 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Any:
"""simple docstring"""
UpperCamelCase = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_pad
def A__ ( self ) -> str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
"""simple docstring"""
if not batched:
UpperCamelCase = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
UpperCamelCase ,UpperCamelCase = image.size
else:
UpperCamelCase ,UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase = int(self.size["""shortest_edge"""] * h / w )
UpperCamelCase = self.size["""shortest_edge"""]
elif w > h:
UpperCamelCase = self.size["""shortest_edge"""]
UpperCamelCase = int(self.size["""shortest_edge"""] * w / h )
else:
UpperCamelCase = self.size["""shortest_edge"""]
UpperCamelCase = self.size["""shortest_edge"""]
else:
UpperCamelCase = []
for image in image_inputs:
UpperCamelCase ,UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
UpperCamelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( lowerCamelCase , unittest.TestCase ):
lowercase = ConditionalDetrImageProcessor if is_vision_available() else None
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = ConditionalDetrImageProcessingTester(self )
@property
def A__ ( self ) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """size""" ) )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {"""image_id""": 39769, """annotations""": target}
# encode them
UpperCamelCase = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
UpperCamelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _SCREAMING_SNAKE_CASE ) )
# verify size
UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _SCREAMING_SNAKE_CASE ) )
@slow
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
UpperCamelCase = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
UpperCamelCase = ConditionalDetrImageProcessor(format="""coco_panoptic""" )
UpperCamelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _SCREAMING_SNAKE_CASE ) )
# verify masks
UpperCamelCase = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _SCREAMING_SNAKE_CASE ) )
# verify size
UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _SCREAMING_SNAKE_CASE ) )
| 183 | 1 |
from pathlib import Path
import numpy as np
from PIL import Image
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ : Optional[int] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return (gray > 127) & (gray <= 255)
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = np.zeros_like(lowerCamelCase__ )
lowercase__ : str = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowercase__ : str = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowercase__ : str = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowercase__ : Union[str, Any] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowerCAmelCase__ = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg'''
lowerCAmelCase__ = np.array(Image.open(lena_path))
# kernel to be applied
lowerCAmelCase__ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowerCAmelCase__ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowerCAmelCase__ = Image.fromarray(output).convert('''RGB''')
pil_img.save('''result_dilation.png''')
| 130 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 130 | 1 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if not all(char in '''01''' for char in bin_string ):
raise ValueError('''Non-binary value was passed to the function''' )
if not bin_string:
raise ValueError('''Empty string was passed to the function''' )
_UpperCAmelCase = ''''''
while len(_SCREAMING_SNAKE_CASE ) % 3 != 0:
_UpperCAmelCase = '''0''' + bin_string
_UpperCAmelCase = [
bin_string[index : index + 3]
for index in range(len(_SCREAMING_SNAKE_CASE ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
_UpperCAmelCase = 0
for index, val in enumerate(_SCREAMING_SNAKE_CASE ):
oct_val += int(2 ** (2 - index) * int(_SCREAMING_SNAKE_CASE ) )
oct_string += str(_SCREAMING_SNAKE_CASE )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 326 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A : List[Any] = logging.get_logger(__name__)
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = ["""pixel_values"""]
def __init__( self : Tuple , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Dict[str, int]] = None , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[int, float] = 1 / 2_5_5 , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , **__UpperCamelCase : Tuple , )->None:
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = size if size is not None else {'''shortest_edge''': 2_5_6}
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : int , )->np.ndarray:
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_UpperCAmelCase = get_resize_output_image_size(__UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCamelCase )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Dict , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Tuple , )->np.ndarray:
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
return center_crop(__UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Any , __UpperCamelCase : np.ndarray , __UpperCamelCase : float , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Union[str, Any] )->np.ndarray:
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[str] , )->np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : List[str] , __UpperCamelCase : ImageInput , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[float] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__UpperCamelCase : str , )->List[Any]:
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
_UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 326 | 1 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Dict = '''T5Config'''
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->jnp.ndarray:
"""simple docstring"""
A = jnp.zeros_like(UpperCAmelCase )
A = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
A = shifted_input_ids.at[:, 0].set(UpperCAmelCase )
A = jnp.where(shifted_input_ids == -100 , UpperCAmelCase , UpperCAmelCase )
return shifted_input_ids
class __UpperCAmelCase ( _a ):
'''simple docstring'''
__lowerCAmelCase = """mt5"""
__lowerCAmelCase = MTaConfig
class __UpperCAmelCase ( _a ):
'''simple docstring'''
__lowerCAmelCase = """mt5"""
__lowerCAmelCase = MTaConfig
class __UpperCAmelCase ( _a ):
'''simple docstring'''
__lowerCAmelCase = """mt5"""
__lowerCAmelCase = MTaConfig
| 258 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(__magic_name__ ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
UpperCamelCase :int = QuantumRegister(__magic_name__ , """qr""" )
UpperCamelCase :str = ClassicalRegister(__magic_name__ , """cr""" )
UpperCamelCase :str = QuantumCircuit(__magic_name__ , __magic_name__ )
UpperCamelCase :List[Any] = number_of_qubits
for i in range(__magic_name__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(__magic_name__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , __magic_name__ , __magic_name__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(__magic_name__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(__magic_name__ , __magic_name__ )
# simulate with 10000 shots
UpperCamelCase :str = Aer.get_backend("""qasm_simulator""" )
UpperCamelCase :Dict = execute(__magic_name__ , __magic_name__ , shots=1_0000 )
return job.result().get_counts(__magic_name__ )
if __name__ == "__main__":
print(
F'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 38 | 0 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : List[Any] ,__lowercase : Optional[Any] ,__lowercase : Any=10_24 ):
'''simple docstring'''
A_ , A_ : Tuple = [], []
A_ : Dict = list(zip(__lowercase ,__lowercase ) )
A_ , A_ : List[Any] = sorted_examples[0]
def is_too_big(__lowercase : Optional[Any] ):
return tok(__lowercase ,return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
A_ : Optional[Any] = new_src + ' ' + src
A_ : Optional[Any] = new_tgt + ' ' + tgt
if is_too_big(__lowercase ) or is_too_big(__lowercase ): # cant fit, finalize example
finished_src.append(__lowercase )
finished_tgt.append(__lowercase )
A_ , A_ : List[str] = src, tgt
else: # can fit, keep adding
A_ , A_ : str = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__lowercase )
finished_tgt.append(__lowercase )
return finished_src, finished_tgt
def UpperCamelCase ( __lowercase : Any ,__lowercase : Path ,__lowercase : str ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = Path(__lowercase )
save_path.mkdir(exist_ok=__lowercase )
for split in ["train"]:
A_ , A_ : List[str] = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
A_ : Any = [x.rstrip() for x in Path(__lowercase ).open().readlines()]
A_ : List[Any] = [x.rstrip() for x in Path(__lowercase ).open().readlines()]
A_ , A_ : str = pack_examples(__lowercase ,__lowercase ,__lowercase ,__lowercase )
print(f'''packed {split} split from {len(__lowercase )} examples -> {len(__lowercase )}.''' )
Path(save_path / f'''{split}.source''' ).open('w' ).write('\n'.join(__lowercase ) )
Path(save_path / f'''{split}.target''' ).open('w' ).write('\n'.join(__lowercase ) )
for split in ["val", "test"]:
A_ , A_ : str = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
shutil.copyfile(__lowercase ,save_path / f'''{split}.source''' )
shutil.copyfile(__lowercase ,save_path / f'''{split}.target''' )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument('--tok_name' ,type=__lowercase ,help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' ,type=__lowercase ,default=1_28 )
parser.add_argument('--data_dir' ,type=__lowercase )
parser.add_argument('--save_path' ,type=__lowercase )
A_ : List[Any] = parser.parse_args()
A_ : Dict = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__lowercase ,Path(args.data_dir ) ,args.max_seq_len ,args.save_path )
if __name__ == "__main__":
packer_cli()
| 192 | from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCAmelCase = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = 42
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
prior=lowercase , image_encoder=lowercase , image_processor=lowercase , scheduler=lowercase , renderer=lowercase , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
if latents is None:
A_ : Optional[Any] = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
A_ : Optional[int] = latents.to(lowercase )
A_ : List[Any] = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self , lowercase=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
A_ : Tuple = torch.device(F'''cuda:{gpu_id}''' )
A_ : Dict = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , ):
"""simple docstring"""
if isinstance(lowercase , lowercase ) and isinstance(image[0] , torch.Tensor ):
A_ : Tuple = torch.cat(lowercase , axis=0 ) if image[0].ndim == 4 else torch.stack(lowercase , axis=0 )
if not isinstance(lowercase , torch.Tensor ):
A_ : Dict = self.image_processor(lowercase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
A_ : List[str] = image.to(dtype=self.image_encoder.dtype , device=lowercase )
A_ : Tuple = self.image_encoder(lowercase )['last_hidden_state']
A_ : Dict = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
A_ : List[str] = image_embeds.repeat_interleave(lowercase , dim=0 )
if do_classifier_free_guidance:
A_ : str = torch.zeros_like(lowercase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self , lowercase , lowercase = 1 , lowercase = 2_5 , lowercase = None , lowercase = None , lowercase = 4.0 , lowercase = 6_4 , lowercase = "pil" , lowercase = True , ):
"""simple docstring"""
if isinstance(lowercase , PIL.Image.Image ):
A_ : int = 1
elif isinstance(lowercase , torch.Tensor ):
A_ : int = image.shape[0]
elif isinstance(lowercase , lowercase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
A_ : List[str] = len(lowercase )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase )}''' )
A_ : Any = self._execution_device
A_ : List[Any] = batch_size * num_images_per_prompt
A_ : int = guidance_scale > 1.0
A_ : Optional[int] = self._encode_image(lowercase , lowercase , lowercase , lowercase )
# prior
self.scheduler.set_timesteps(lowercase , device=lowercase )
A_ : Dict = self.scheduler.timesteps
A_ : int = self.prior.config.num_embeddings
A_ : int = self.prior.config.embedding_dim
A_ : Dict = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
A_ : Union[str, Any] = latents.reshape(latents.shape[0] , lowercase , lowercase )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
A_ : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : List[Any] = self.scheduler.scale_model_input(lowercase , lowercase )
A_ : Any = self.prior(
lowercase , timestep=lowercase , proj_embedding=lowercase , ).predicted_image_embedding
# remove the variance
A_ , A_ : int = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
A_ , A_ : List[Any] = noise_pred.chunk(2 )
A_ : str = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
A_ : Optional[int] = self.scheduler.step(
lowercase , timestep=lowercase , sample=lowercase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowercase )
A_ : str = []
for i, latent in enumerate(lowercase ):
print()
A_ : Optional[Any] = self.renderer.decode(
latent[None, :] , lowercase , size=lowercase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(lowercase )
A_ : Dict = torch.stack(lowercase )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
A_ : Dict = images.cpu().numpy()
if output_type == "pil":
A_ : str = [self.numpy_to_pil(lowercase ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowercase )
| 192 | 1 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __magic_name__ ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=_a , )
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_a )
class __magic_name__ ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=_a , )
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_a )
def a__ ( ) -> Dict:
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def a__ ( ) -> Optional[int]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class __magic_name__ ( __lowercase ):
'''simple docstring'''
@require_beam
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase = DummyBeamDataset(cache_dir=_a , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_a , builder.name , """default""" , """0.0.0""" , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
lowerCamelCase = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _a )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _a )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_a , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def _lowerCAmelCase ( self ):
"""simple docstring"""
import apache_beam as beam
lowerCamelCase = beam.io.parquetio.WriteToParquet
lowerCamelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase = DummyBeamDataset(cache_dir=_a , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
lowerCamelCase = partial(_a , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_a , builder.name , """default""" , """0.0.0""" , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_a , builder.name , """default""" , """0.0.0""" , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
lowerCamelCase = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _a )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _a )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(_a , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def _lowerCAmelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase = DummyBeamDataset(cache_dir=_a )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase = NestedBeamDataset(cache_dir=_a , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_a , builder.name , """default""" , """0.0.0""" , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
lowerCamelCase = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _a )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _a )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_a , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 291 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( __lowercase , unittest.TestCase):
'''simple docstring'''
_A = KandinskyVaaPriorPipeline
_A = ['prompt']
_A = ['prompt', 'negative_prompt']
_A = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
_A = False
@property
def _lowerCamelCase ( self :Tuple ) -> Optional[Any]:
return 3_2
@property
def _lowerCamelCase ( self :List[str] ) -> List[str]:
return 3_2
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> Tuple:
return self.time_input_dim
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self :Tuple ) -> str:
return 1_0_0
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
__UpperCamelCase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def _lowerCamelCase ( self :str ) -> int:
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self :int ) -> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
__UpperCamelCase : List[Any] = PriorTransformer(**a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def _lowerCamelCase ( self :Dict ) -> Union[str, Any]:
torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
__UpperCamelCase : int = CLIPVisionModelWithProjection(a )
return model
@property
def _lowerCamelCase ( self :Dict ) -> Optional[Any]:
__UpperCamelCase : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_2_4 , )
return image_processor
def _lowerCamelCase ( self :str ) -> Optional[int]:
__UpperCamelCase : str = self.dummy_prior
__UpperCamelCase : int = self.dummy_image_encoder
__UpperCamelCase : Tuple = self.dummy_text_encoder
__UpperCamelCase : int = self.dummy_tokenizer
__UpperCamelCase : Optional[Any] = self.dummy_image_processor
__UpperCamelCase : int = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=a , clip_sample_range=10.0 , )
__UpperCamelCase : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def _lowerCamelCase ( self :Optional[Any] , a :int , a :Union[str, Any]=0 ) -> Any:
if str(a ).startswith("mps" ):
__UpperCamelCase : int = torch.manual_seed(a )
else:
__UpperCamelCase : List[Any] = torch.Generator(device=a ).manual_seed(a )
__UpperCamelCase : int = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def _lowerCamelCase ( self :List[Any] ) -> Dict:
__UpperCamelCase : int = "cpu"
__UpperCamelCase : List[str] = self.get_dummy_components()
__UpperCamelCase : List[str] = self.pipeline_class(**a )
__UpperCamelCase : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__UpperCamelCase : int = pipe(**self.get_dummy_inputs(a ) )
__UpperCamelCase : int = output.image_embeds
__UpperCamelCase : Optional[int] = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
__UpperCamelCase : Union[str, Any] = image[0, -1_0:]
__UpperCamelCase : List[str] = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
__UpperCamelCase : List[Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self :int ) -> Union[str, Any]:
__UpperCamelCase : str = torch_device == "cpu"
__UpperCamelCase : List[str] = True
__UpperCamelCase : List[Any] = False
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , test_mean_pixel_difference=a , )
@skip_mps
def _lowerCamelCase ( self :Any ) -> int:
__UpperCamelCase : Optional[Any] = torch_device == "cpu"
__UpperCamelCase : Dict = False
self._test_attention_slicing_forward_pass(
test_max_difference=a , test_mean_pixel_difference=a , ) | 232 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _lowerCamelCase( __UpperCamelCase ):
lowercase_ : Union[str, Any] = """levit"""
def __init__( self, lowerCamelCase=2_24, lowerCamelCase=3, lowerCamelCase=3, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=16, lowerCamelCase=[1_28, 2_56, 3_84], lowerCamelCase=[4, 8, 12], lowerCamelCase=[4, 4, 4], lowerCamelCase=[16, 16, 16], lowerCamelCase=0, lowerCamelCase=[2, 2, 2], lowerCamelCase=[2, 2, 2], lowerCamelCase=0.0_2, **lowerCamelCase, ) -> Dict:
"""simple docstring"""
super().__init__(**lowerCamelCase)
_lowercase : Tuple = image_size
_lowercase : Optional[Any] = num_channels
_lowercase : Union[str, Any] = kernel_size
_lowercase : Optional[int] = stride
_lowercase : str = padding
_lowercase : int = hidden_sizes
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Union[str, Any] = depths
_lowercase : Optional[int] = key_dim
_lowercase : Dict = drop_path_rate
_lowercase : Dict = patch_size
_lowercase : str = attention_ratio
_lowercase : Tuple = mlp_ratio
_lowercase : Tuple = initializer_range
_lowercase : str = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _lowerCamelCase( __UpperCamelCase ):
lowercase_ : Optional[Any] = version.parse("""1.11""" )
@property
def UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def UpperCamelCase ( self) -> float:
"""simple docstring"""
return 1E-4
| 351 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
SCREAMING_SNAKE_CASE : Optional[int] = 299792458
# Symbols
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = symbols("ct x y z")
def UpperCamelCase_( lowerCamelCase_ ) -> float:
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def UpperCamelCase_( lowerCamelCase_ ) -> float:
return 1 / sqrt(1 - beta(lowerCamelCase_ ) ** 2 )
def UpperCamelCase_( lowerCamelCase_ ) -> np.ndarray:
return np.array(
[
[gamma(lowerCamelCase_ ), -gamma(lowerCamelCase_ ) * beta(lowerCamelCase_ ), 0, 0],
[-gamma(lowerCamelCase_ ) * beta(lowerCamelCase_ ), gamma(lowerCamelCase_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ = None ) -> np.ndarray:
# Ensure event is not empty
if event is None:
_lowercase : Union[str, Any] = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(lowerCamelCase_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
SCREAMING_SNAKE_CASE : Optional[int] = transform(29979245)
print("Example of four vector: ")
print(F"ct' = {four_vector[0]}")
print(F"x' = {four_vector[1]}")
print(F"y' = {four_vector[2]}")
print(F"z' = {four_vector[3]}")
# Substitute symbols with numerical values
SCREAMING_SNAKE_CASE : Tuple = {ct: c, x: 1, y: 1, z: 1}
SCREAMING_SNAKE_CASE : Tuple = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"\n{numerical_vector}")
| 84 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a : int = logging.get_logger(__name__)
a : int = {
'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json',
}
class _a ( __lowerCAmelCase , __lowerCAmelCase ):
A = '''convnextv2'''
def __init__(self, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=1E-12, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=224, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> Dict:
super().__init__(**lowerCAmelCase_ )
UpperCAmelCase_: Dict = num_channels
UpperCAmelCase_: Any = patch_size
UpperCAmelCase_: str = num_stages
UpperCAmelCase_: List[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
UpperCAmelCase_: Any = [3, 3, 9, 3] if depths is None else depths
UpperCAmelCase_: int = hidden_act
UpperCAmelCase_: str = initializer_range
UpperCAmelCase_: List[str] = layer_norm_eps
UpperCAmelCase_: Any = drop_path_rate
UpperCAmelCase_: Optional[int] = image_size
UpperCAmelCase_: Dict = ["""stem"""] + [f'stage{idx}' for idx in range(1, len(self.depths ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_: List[Any] = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_, out_indices=lowerCAmelCase_, stage_names=self.stage_names )
| 147 | import collections
import importlib.util
import os
import re
from pathlib import Path
_SCREAMING_SNAKE_CASE = 'src/transformers'
# Matches is_xxx_available()
_SCREAMING_SNAKE_CASE = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
_SCREAMING_SNAKE_CASE = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_SCREAMING_SNAKE_CASE = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
_SCREAMING_SNAKE_CASE = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
_SCREAMING_SNAKE_CASE = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_SCREAMING_SNAKE_CASE = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
_SCREAMING_SNAKE_CASE = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
_SCREAMING_SNAKE_CASE = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
_SCREAMING_SNAKE_CASE = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
_SCREAMING_SNAKE_CASE = re.compile(R'^\s*try:')
# Catches a line with else:
_SCREAMING_SNAKE_CASE = re.compile(R'^\s*else:')
def snake_case ( snake_case__ :Optional[Any]) -> List[str]:
if _re_test_backend.search(snake_case__) is None:
return None
_A = [b[0] for b in _re_backend.findall(snake_case__)]
backends.sort()
return "_and_".join(snake_case__)
def snake_case ( snake_case__ :Any) -> Any:
with open(snake_case__ , """r""" , encoding="""utf-8""" , newline="""\n""") as f:
_A = f.readlines()
_A = 0
while line_index < len(snake_case__) and not lines[line_index].startswith("""_import_structure = {"""):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(snake_case__):
return None
# First grab the objects without a specific backend in _import_structure
_A = []
while not lines[line_index].startswith("""if TYPE_CHECKING""") and find_backend(lines[line_index]) is None:
_A = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(snake_case__):
_A = _re_one_line_import_struct.search(snake_case__).groups()[0]
_A = re.findall("""\[([^\]]+)\]""" , snake_case__)
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """)])
line_index += 1
continue
_A = _re_import_struct_key_value.search(snake_case__)
if single_line_import_search is not None:
_A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """) if len(snake_case__) > 0]
objects.extend(snake_case__)
elif line.startswith(""" """ * 8 + """\""""):
objects.append(line[9:-3])
line_index += 1
_A = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING"""):
# If the line is an if not is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(""" """ * 4):
_A = lines[line_index]
if _re_import_struct_add_one.search(snake_case__) is not None:
objects.append(_re_import_struct_add_one.search(snake_case__).groups()[0])
elif _re_import_struct_add_many.search(snake_case__) is not None:
_A = _re_import_struct_add_many.search(snake_case__).groups()[0].split(""", """)
_A = [obj[1:-1] for obj in imports if len(snake_case__) > 0]
objects.extend(snake_case__)
elif _re_between_brackets.search(snake_case__) is not None:
_A = _re_between_brackets.search(snake_case__).groups()[0].split(""", """)
_A = [obj[1:-1] for obj in imports if len(snake_case__) > 0]
objects.extend(snake_case__)
elif _re_quote_object.search(snake_case__) is not None:
objects.append(_re_quote_object.search(snake_case__).groups()[0])
elif line.startswith(""" """ * 8 + """\""""):
objects.append(line[9:-3])
elif line.startswith(""" """ * 12 + """\""""):
objects.append(line[13:-3])
line_index += 1
_A = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_A = []
while (
line_index < len(snake_case__)
and find_backend(lines[line_index]) is None
and not lines[line_index].startswith("""else""")
):
_A = lines[line_index]
_A = _re_import.search(snake_case__)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """))
elif line.startswith(""" """ * 8):
objects.append(line[8:-2])
line_index += 1
_A = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(snake_case__):
# If the line is an if is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(""" """ * 8):
_A = lines[line_index]
_A = _re_import.search(snake_case__)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """))
elif line.startswith(""" """ * 12):
objects.append(line[12:-2])
line_index += 1
_A = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def snake_case ( snake_case__ :Dict , snake_case__ :int) -> List[Any]:
def find_duplicates(snake_case__ :Union[str, Any]):
return [k for k, v in collections.Counter(snake_case__).items() if v > 1]
if list(import_dict_objects.keys()) != list(type_hint_objects.keys()):
return ["Both sides of the init do not have the same backends!"]
_A = []
for key in import_dict_objects.keys():
_A = find_duplicates(import_dict_objects[key])
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''')
_A = find_duplicates(type_hint_objects[key])
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''')
if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])):
_A = """base imports""" if key == """none""" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''')
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''')
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''')
return errors
def snake_case ( ) -> int:
_A = []
for root, _, files in os.walk(snake_case__):
if "__init__.py" in files:
_A = os.path.join(snake_case__ , """__init__.py""")
_A = parse_init(snake_case__)
if objects is not None:
_A = analyze_results(*snake_case__)
if len(snake_case__) > 0:
_A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(snake_case__))
if len(snake_case__) > 0:
raise ValueError("""\n\n""".join(snake_case__))
def snake_case ( ) -> Optional[Any]:
_A = []
for path, directories, files in os.walk(snake_case__):
for folder in directories:
# Ignore private modules
if folder.startswith("""_"""):
directories.remove(snake_case__)
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(snake_case__) / folder).glob("""*.py"""))) == 0:
continue
_A = str((Path(snake_case__) / folder).relative_to(snake_case__))
_A = short_path.replace(os.path.sep , """.""")
submodules.append(snake_case__)
for fname in files:
if fname == "__init__.py":
continue
_A = str((Path(snake_case__) / fname).relative_to(snake_case__))
_A = short_path.replace(""".py""" , """""").replace(os.path.sep , """.""")
if len(submodule.split(""".""")) == 1:
submodules.append(snake_case__)
return submodules
_SCREAMING_SNAKE_CASE = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def snake_case ( ) -> Union[str, Any]:
# This is to make sure the transformers module imported is the one in the repo.
_A = importlib.util.spec_from_file_location(
"""transformers""" , os.path.join(snake_case__ , """__init__.py""") , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
_A = spec.loader.load_module()
_A = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(snake_case__) > 0:
_A = """\n""".join(F'''- {module}''' for module in module_not_registered)
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
F'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""")
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 180 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ =logging.get_logger(__name__)
UpperCamelCase_ ={
"""bigcode/gpt_bigcode-santacoder""": """https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json""",
}
class _a ( _lowerCAmelCase ):
UpperCamelCase = '''gpt_bigcode'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : int, lowerCAmelCase__ : List[Any]=5_0_2_5_7, lowerCAmelCase__ : Optional[int]=1_0_2_4, lowerCAmelCase__ : Union[str, Any]=7_6_8, lowerCAmelCase__ : List[str]=1_2, lowerCAmelCase__ : List[str]=1_2, lowerCAmelCase__ : Optional[Any]=None, lowerCAmelCase__ : List[Any]="gelu_pytorch_tanh", lowerCAmelCase__ : List[Any]=0.1, lowerCAmelCase__ : Tuple=0.1, lowerCAmelCase__ : int=0.1, lowerCAmelCase__ : List[str]=1e-5, lowerCAmelCase__ : Union[str, Any]=0.02, lowerCAmelCase__ : str=True, lowerCAmelCase__ : Dict=True, lowerCAmelCase__ : Tuple=5_0_2_5_6, lowerCAmelCase__ : int=5_0_2_5_6, lowerCAmelCase__ : int=True, lowerCAmelCase__ : Tuple=True, lowerCAmelCase__ : List[str]=True, **lowerCAmelCase__ : Any, ) -> Dict:
'''simple docstring'''
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Dict = n_positions
_UpperCamelCase : Tuple = n_embd
_UpperCamelCase : str = n_layer
_UpperCamelCase : Optional[Any] = n_head
_UpperCamelCase : Any = n_inner
_UpperCamelCase : Any = activation_function
_UpperCamelCase : Any = resid_pdrop
_UpperCamelCase : List[str] = embd_pdrop
_UpperCamelCase : Optional[Any] = attn_pdrop
_UpperCamelCase : Union[str, Any] = layer_norm_epsilon
_UpperCamelCase : str = initializer_range
_UpperCamelCase : str = scale_attn_weights
_UpperCamelCase : Optional[Any] = use_cache
_UpperCamelCase : int = attention_softmax_in_fpaa
_UpperCamelCase : int = scale_attention_softmax_in_fpaa
_UpperCamelCase : List[str] = multi_query
_UpperCamelCase : List[str] = bos_token_id
_UpperCamelCase : str = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__ )
| 128 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _a :
def __init__( self : Dict, lowerCAmelCase__ : Optional[Any], lowerCAmelCase__ : Optional[int]=1_3, lowerCAmelCase__ : Optional[Any]=7, lowerCAmelCase__ : Optional[Any]=True, lowerCAmelCase__ : Any=True, lowerCAmelCase__ : str=True, lowerCAmelCase__ : Any=9_9, lowerCAmelCase__ : Dict=3_2, lowerCAmelCase__ : List[Any]=5, lowerCAmelCase__ : Tuple=4, lowerCAmelCase__ : List[Any]=3_7, lowerCAmelCase__ : Tuple="gelu", lowerCAmelCase__ : Any=0.1, lowerCAmelCase__ : Optional[Any]=0.1, lowerCAmelCase__ : Dict=5_1_2, lowerCAmelCase__ : List[str]=1_6, lowerCAmelCase__ : Tuple=2, lowerCAmelCase__ : int=0.02, lowerCAmelCase__ : int=3, lowerCAmelCase__ : Optional[Any]=4, lowerCAmelCase__ : Dict=None, ) -> int:
'''simple docstring'''
_UpperCamelCase : Tuple = parent
_UpperCamelCase : Union[str, Any] = batch_size
_UpperCamelCase : Union[str, Any] = seq_length
_UpperCamelCase : Tuple = is_training
_UpperCamelCase : Tuple = use_token_type_ids
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : Dict = vocab_size
_UpperCamelCase : int = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Optional[Any] = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Union[str, Any] = max_position_embeddings
_UpperCamelCase : int = type_vocab_size
_UpperCamelCase : List[str] = type_sequence_label_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : int = num_labels
_UpperCamelCase : List[str] = num_choices
_UpperCamelCase : str = scope
_UpperCamelCase : Optional[int] = self.vocab_size - 1
def snake_case ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_UpperCamelCase : List[str] = None
if self.use_token_type_ids:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : str = None
_UpperCamelCase : List[str] = None
if self.use_labels:
_UpperCamelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
_UpperCamelCase : Dict = ids_tensor([self.batch_size], self.num_choices )
_UpperCamelCase : str = OpenAIGPTConfig(
vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, )
_UpperCamelCase : List[Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case ( self : Union[str, Any], lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : List[str], lowerCAmelCase__ : List[str], lowerCAmelCase__ : List[str], *lowerCAmelCase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Dict = OpenAIGPTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase : List[str] = model(lowerCAmelCase__, token_type_ids=lowerCAmelCase__, head_mask=lowerCAmelCase__ )
_UpperCamelCase : Any = model(lowerCAmelCase__, token_type_ids=lowerCAmelCase__ )
_UpperCamelCase : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Any, lowerCAmelCase__ : Tuple, lowerCAmelCase__ : Union[str, Any], lowerCAmelCase__ : Any, lowerCAmelCase__ : Optional[Any], *lowerCAmelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase : Any = OpenAIGPTLMHeadModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase : Tuple = model(lowerCAmelCase__, token_type_ids=lowerCAmelCase__, labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : Optional[int], lowerCAmelCase__ : str, lowerCAmelCase__ : List[str], lowerCAmelCase__ : Any, lowerCAmelCase__ : List[Any], *lowerCAmelCase__ : Any ) -> int:
'''simple docstring'''
_UpperCamelCase : Tuple = OpenAIGPTDoubleHeadsModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase : Optional[int] = model(lowerCAmelCase__, token_type_ids=lowerCAmelCase__, labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : List[str], lowerCAmelCase__ : Dict, lowerCAmelCase__ : Dict, lowerCAmelCase__ : List[str], lowerCAmelCase__ : Optional[Any], *lowerCAmelCase__ : List[str] ) -> int:
'''simple docstring'''
_UpperCamelCase : List[Any] = self.num_labels
_UpperCamelCase : Optional[int] = OpenAIGPTForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
_UpperCamelCase : Union[str, Any] = model(lowerCAmelCase__, token_type_ids=lowerCAmelCase__, labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case ( self : str ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Any = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Tuple = config_and_inputs
_UpperCamelCase : Tuple = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _a ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
UpperCamelCase = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
UpperCamelCase = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case ( self : Union[str, Any], lowerCAmelCase__ : Any, lowerCAmelCase__ : List[str], lowerCAmelCase__ : str, lowerCAmelCase__ : List[str], lowerCAmelCase__ : List[str] ) -> List[str]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def snake_case ( self : str, lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : List[str], lowerCAmelCase__ : Optional[int]=False ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = super()._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__, return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_UpperCamelCase : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length), dtype=torch.long, device=lowerCAmelCase__, )
_UpperCamelCase : Tuple = inputs_dict['''labels''']
_UpperCamelCase : List[str] = inputs_dict['''labels''']
_UpperCamelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices), dtype=torch.long, device=lowerCAmelCase__, )
_UpperCamelCase : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase__ )
return inputs_dict
def snake_case ( self : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = OpenAIGPTModelTester(self )
_UpperCamelCase : int = ConfigTester(self, config_class=lowerCAmelCase__, n_embd=3_7 )
def snake_case ( self : Optional[int] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case ( self : Optional[int] ) -> Any:
'''simple docstring'''
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase__ )
def snake_case ( self : Any ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase__ )
def snake_case ( self : int ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase__ )
def snake_case ( self : List[str] ) -> int:
'''simple docstring'''
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase__ )
@slow
def snake_case ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : int = OpenAIGPTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class _a ( unittest.TestCase ):
@slow
def snake_case ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : int = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowerCAmelCase__ )
_UpperCamelCase : str = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]], dtype=torch.long, device=lowerCAmelCase__ ) # the president is
_UpperCamelCase : Optional[int] = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_UpperCamelCase : Union[str, Any] = model.generate(lowerCAmelCase__, do_sample=lowerCAmelCase__ )
self.assertListEqual(output_ids[0].tolist(), lowerCAmelCase__ )
| 128 | 1 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.json'''}
_lowercase = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
_lowercase = {'''mgp-str''': 27}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = VOCAB_FILES_NAMES
_lowerCamelCase: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any] ,A_ : str ,A_ : List[Any]="[GO]" ,A_ : Any="[GO]" ,A_ : Union[str, Any]="[s]" ,A_ : Dict="[GO]" ,**A_ : Any ) -> str:
super().__init__(
unk_token=A_ ,bos_token=A_ ,eos_token=A_ ,pad_token=A_ ,**A_ ,)
with open(A_ ,encoding='utf-8' ) as vocab_handle:
A = json.load(A_ )
A = {v: k for k, v in self.vocab.items()}
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
return len(self.vocab )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return dict(self.vocab ,**self.added_tokens_encoder )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : List[Any] ) -> Union[str, Any]:
A = []
for s in text:
char_tokens.extend(A_ )
return char_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ) -> List[Any]:
return self.vocab.get(A_ ,self.vocab.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Optional[int] ) -> str:
return self.decoder.get(A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error('Vocabulary path ({}) should be a directory'.format(A_ ) )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(A_ ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab ,indent=2 ,sort_keys=A_ ,ensure_ascii=A_ ) + '\n' )
return (vocab_file,) | 74 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
lowerCamelCase : Any = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase : str = model(__magic_name__ )["""last_hidden_state"""]
lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
lowerCamelCase : Dict = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 287 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {}
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = """llama"""
lowerCamelCase = ["""past_key_values"""]
def __init__( self : Optional[int] , UpperCamelCase__ : Dict=3_2000 , UpperCamelCase__ : Union[str, Any]=4096 , UpperCamelCase__ : int=1_1008 , UpperCamelCase__ : int=32 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Any="silu" , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : str=1e-6 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : int=1 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
snake_case : str = vocab_size
snake_case : Any = max_position_embeddings
snake_case : int = hidden_size
snake_case : Union[str, Any] = intermediate_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
snake_case : Dict = num_attention_heads
snake_case : List[str] = num_key_value_heads
snake_case : str = hidden_act
snake_case : Tuple = initializer_range
snake_case : str = rms_norm_eps
snake_case : Dict = pretraining_tp
snake_case : Optional[Any] = use_cache
snake_case : List[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ , )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f'got {self.rope_scaling}' )
snake_case : str = self.rope_scaling.get('''type''' , UpperCamelCase__ )
snake_case : Union[str, Any] = self.rope_scaling.get('''factor''' , UpperCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 83 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = 1000 ) -> int:
'''simple docstring'''
snake_case : Dict = 1
snake_case : str = 0
for divide_by_number in range(SCREAMING_SNAKE_CASE__ , digit + 1 ):
snake_case : list[int] = []
snake_case : Optional[Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(SCREAMING_SNAKE_CASE__ ):
snake_case : Dict = len(SCREAMING_SNAKE_CASE__ )
snake_case : List[str] = divide_by_number
else:
has_been_divided.append(SCREAMING_SNAKE_CASE__ )
snake_case : Any = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 1 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_snake_case = "bart"
_snake_case = True
@st.cache(allow_output_mutation=snake_case_ )
def lowerCAmelCase_ ( ):
if LOAD_DENSE_INDEX:
_A : Optional[int] = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
_A : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
_A : str = qar_model.eval()
else:
_A , _A : Dict = (None, None)
if MODEL_TYPE == "bart":
_A : Tuple = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
_A : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
_A : int = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
_A : Tuple = sas_model.eval()
else:
_A , _A : Optional[int] = make_qa_sas_model(
model_name="""t5-small""",from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""",device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case_ )
def lowerCAmelCase_ ( ):
if LOAD_DENSE_INDEX:
_A : int = faiss.StandardGpuResources()
_A : int = datasets.load_dataset(path="""wiki_snippets""",name="""wiki40b_en_100_0""" )["""train"""]
_A : List[str] = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""",dtype="""float32""",mode="""r""",shape=(wikiaab_passages.num_rows, 128),)
_A : Any = faiss.IndexFlatIP(128 )
_A : Union[str, Any] = faiss.index_cpu_to_gpu(snake_case_,1,snake_case_ )
wikiaab_gpu_index_flat.add(snake_case_ ) # TODO fix for larger GPU
else:
_A , _A : List[str] = (None, None)
_A : List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case_ )
def lowerCAmelCase_ ( ):
_A : Tuple = datasets.load_dataset("""eli5""",name="""LFQA_reddit""" )
_A : Optional[int] = elia["""train_eli5"""]
_A : int = np.memmap(
"""eli5_questions_reps.dat""",dtype="""float32""",mode="""r""",shape=(elia_train.num_rows, 128) )
_A : int = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(snake_case_ )
return (elia_train, eli5_train_q_index)
_snake_case , _snake_case , _snake_case = load_indexes()
_snake_case , _snake_case , _snake_case , _snake_case = load_models()
_snake_case , _snake_case = load_train_data()
def lowerCAmelCase_ ( snake_case_,snake_case_=10 ):
_A : Union[str, Any] = embed_questions_for_retrieval([question],snake_case_,snake_case_ )
_A , _A : Optional[Any] = eli5_train_q_index.search(snake_case_,snake_case_ )
_A : int = [elia_train[int(snake_case_ )] for i in I[0]]
return nn_examples
def lowerCAmelCase_ ( snake_case_,snake_case_="wiki40b",snake_case_="dense",snake_case_=10 ):
if source == "none":
_A , _A : List[str] = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_A , _A : int = query_qa_dense_index(
snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ )
else:
_A , _A : Union[str, Any] = query_es_index(
snake_case_,snake_case_,index_name="""english_wiki40b_snippets_100w""",n_results=snake_case_,)
_A : Tuple = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
_A : Dict = """question: {} context: {}""".format(snake_case_,snake_case_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case_ : None),
} )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_=64,snake_case_=256,snake_case_=False,snake_case_=2,snake_case_=0.95,snake_case_=0.8 ):
with torch.no_grad():
_A : Union[str, Any] = qa_sas_generate(
snake_case_,snake_case_,snake_case_,num_answers=1,num_beams=snake_case_,min_len=snake_case_,max_len=snake_case_,do_sample=snake_case_,temp=snake_case_,top_p=snake_case_,top_k=snake_case_,max_input_length=1024,device="""cuda:0""",)[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
_snake_case = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
_snake_case = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_snake_case = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
_snake_case = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
_snake_case = st.sidebar.checkbox("Demo options")
if demo_options:
_snake_case = st.sidebar.selectbox(
"",
action_list,
index=3,
)
_snake_case = action_list.index(action_st)
_snake_case = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
_snake_case = show_type == "Show full text of passages"
else:
_snake_case = 3
_snake_case = True
_snake_case = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
_snake_case = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
_snake_case = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
_snake_case = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
_snake_case = "wiki40b"
_snake_case = "dense"
_snake_case = "beam"
_snake_case = 2
_snake_case = 64
_snake_case = 256
_snake_case = None
_snake_case = None
_snake_case = st.sidebar.checkbox("Generation options")
if generate_options:
_snake_case = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
_snake_case = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
_snake_case = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_snake_case = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_snake_case = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_snake_case = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
_snake_case = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
_snake_case = None
# start main text
_snake_case = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
_snake_case = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_snake_case = st.text_input("Enter your question here:", "")
else:
_snake_case = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
_snake_case , _snake_case = make_support(question, source=wiki_source, method="dense", n_results=10)
_snake_case , _snake_case = make_support(question, source=wiki_source, method="sparse", n_results=10)
_snake_case = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_snake_case = support_list[:10]
_snake_case = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
_snake_case , _snake_case = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_snake_case , _snake_case = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
_snake_case = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
_snake_case = res[1].strip()
if sec_titles == "":
_snake_case = "[{}]({})".format(res[0], wiki_url)
else:
_snake_case = sec_titles.split(" & ")
_snake_case = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
_snake_case = find_nearest_training(question)
_snake_case = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
_snake_case = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
_snake_case = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 30 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : List[str] = logging.get_logger(__name__)
_UpperCamelCase : str = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class snake_case ( _lowerCamelCase ):
__magic_name__ = '''funnel'''
__magic_name__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self : str , A : List[Any]=3_0_5_2_2 , A : Any=[4, 4, 4] , A : Dict=None , A : Dict=2 , A : Optional[int]=7_6_8 , A : List[str]=1_2 , A : Optional[Any]=6_4 , A : str=3_0_7_2 , A : Any="gelu_new" , A : Optional[Any]=0.1 , A : Any=0.1 , A : List[str]=0.0 , A : Tuple=0.1 , A : Union[str, Any]=None , A : Tuple=1E-9 , A : Tuple="mean" , A : str="relative_shift" , A : Any=True , A : List[Any]=True , A : int=True , **A : List[str] , ):
'''simple docstring'''
a : Optional[Any] = vocab_size
a : List[str] = block_sizes
a : Dict = [1] * len(A ) if block_repeats is None else block_repeats
assert len(A ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
a : Union[str, Any] = num_decoder_layers
a : Union[str, Any] = d_model
a : Union[str, Any] = n_head
a : Optional[Any] = d_head
a : Any = d_inner
a : Union[str, Any] = hidden_act
a : List[Any] = hidden_dropout
a : List[str] = attention_dropout
a : int = activation_dropout
a : Union[str, Any] = initializer_range
a : Dict = initializer_std
a : Any = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
a : Any = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
a : List[str] = attention_type
a : Dict = separate_cls
a : List[str] = truncate_seq
a : List[str] = pool_q_only
super().__init__(**A )
@property
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowerCamelCase__ ( self : Union[str, Any] , A : Tuple ):
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' )
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.block_sizes )
@num_blocks.setter
def lowerCamelCase__ ( self : Any , A : Dict ):
'''simple docstring'''
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' )
| 353 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class snake_case ( UpperCAmelCase ):
__magic_name__ = '''beit'''
def __init__( self : int , A : int=8_1_9_2 , A : List[Any]=7_6_8 , A : str=1_2 , A : str=1_2 , A : Dict=3_0_7_2 , A : Optional[int]="gelu" , A : List[Any]=0.0 , A : Union[str, Any]=0.0 , A : Optional[Any]=0.02 , A : Optional[int]=1E-12 , A : Dict=2_2_4 , A : str=1_6 , A : Optional[Any]=3 , A : List[Any]=False , A : Union[str, Any]=False , A : Optional[Any]=False , A : int=False , A : List[str]=0.1 , A : Union[str, Any]=0.1 , A : str=True , A : Tuple=[3, 5, 7, 1_1] , A : List[str]=[1, 2, 3, 6] , A : Optional[Any]=True , A : Union[str, Any]=0.4 , A : Any=2_5_6 , A : List[Any]=1 , A : Optional[Any]=False , A : Any=2_5_5 , **A : List[Any] , ):
'''simple docstring'''
super().__init__(**A )
a : Optional[int] = vocab_size
a : Dict = hidden_size
a : Optional[int] = num_hidden_layers
a : Tuple = num_attention_heads
a : Optional[int] = intermediate_size
a : Optional[Any] = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : Optional[Any] = initializer_range
a : Union[str, Any] = layer_norm_eps
a : Union[str, Any] = image_size
a : str = patch_size
a : Optional[Any] = num_channels
a : List[str] = use_mask_token
a : Optional[Any] = use_absolute_position_embeddings
a : Any = use_relative_position_bias
a : Any = use_shared_relative_position_bias
a : Dict = layer_scale_init_value
a : Optional[int] = drop_path_rate
a : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
a : Optional[Any] = out_indices
a : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
a : Tuple = use_auxiliary_head
a : Dict = auxiliary_loss_weight
a : Any = auxiliary_channels
a : Dict = auxiliary_num_convs
a : List[str] = auxiliary_concat_input
a : List[Any] = semantic_loss_ignore_index
class snake_case ( UpperCAmelCase ):
__magic_name__ = version.parse('''1.11''' )
@property
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return 1E-4
| 186 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.