code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
def _A ( UpperCamelCase_ : list[int], UpperCamelCase_ : list[int]) -> None:
'''simple docstring'''
__lowercase = len(UpperCamelCase_)
print("The following activities are selected:")
# The first activity is always selected
__lowercase = 0
print(UpperCamelCase_, end=",")
# Consider rest of the activities
for j in range(UpperCamelCase_):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(UpperCamelCase_, end=",")
__lowercase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
_a = [1, 3, 0, 5, 8, 5]
_a = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 17 |
from __future__ import annotations
def a__ ( UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Union[str, Any] = len(UpperCAmelCase )
# We need to create solution object to save path.
UpperCAmelCase : int = [[0 for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )]
UpperCAmelCase : Union[str, Any] = run_maze(UpperCAmelCase , 0 , 0 , UpperCAmelCase )
if solved:
print('''\n'''.join(str(UpperCAmelCase ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def a__ ( UpperCAmelCase : list[list[int]] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Dict = len(UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
UpperCAmelCase : Dict = 1
return True
UpperCAmelCase : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds
UpperCAmelCase : List[Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCAmelCase : Any = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCAmelCase : str = 1
# check for directions
if (
run_maze(UpperCAmelCase , i + 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j + 1 , UpperCAmelCase )
or run_maze(UpperCAmelCase , i - 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j - 1 , UpperCAmelCase )
):
return True
UpperCAmelCase : Any = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Any = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 184 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __UpperCAmelCase :
def __init__( self : List[Any], __A : List[str], __A : List[str]=1_3, __A : Any=6_4, __A : Optional[Any]=2, __A : str=3, __A : str=True, __A : str=True, __A : Optional[Any]=3_2, __A : List[str]=5, __A : int=4, __A : str=3_7, __A : str="gelu", __A : Dict=0.1, __A : List[Any]=0.1, __A : Dict=1_0, __A : int=0.0_2, __A : Any=[1, 1_6, 4, 4], __A : Optional[int]=None, ):
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : Any = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : List[str] = patch_size
UpperCAmelCase : Dict = num_channels
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Union[str, Any] = num_hidden_layers
UpperCAmelCase : Optional[Any] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase : str = type_sequence_label_size
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : int = scope
UpperCAmelCase : List[str] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
UpperCAmelCase : str = (self.image_size // 3_2) ** 2
UpperCAmelCase : List[str] = num_patches + 1
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : str = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Any ):
UpperCAmelCase : Dict = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 1_6, 3_2],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, backbone_featmap_shape=self.backbone_featmap_shape, backbone_config=__A, )
def __magic_name__ ( self : Optional[int], __A : Optional[int], __A : int, __A : Tuple ):
UpperCAmelCase : int = ViTHybridModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Tuple = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Tuple, __A : Dict, __A : str, __A : List[str] ):
UpperCAmelCase : str = self.type_sequence_label_size
UpperCAmelCase : List[Any] = ViTHybridForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self : int ):
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = config_and_inputs
UpperCAmelCase : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Any = ViTHybridModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 )
def __magic_name__ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : int ):
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCAmelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A, nn.Linear ) )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(__A )
UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : str = [*signature.parameters.keys()]
UpperCAmelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = _config_zero_init(__A )
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(config=__A )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
UpperCAmelCase : Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@slow
def __magic_name__ ( self : List[str] ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Union[str, Any] = ViTHybridModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> Tuple:
UpperCAmelCase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : str ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__A )
UpperCAmelCase : Tuple = self.default_image_processor
UpperCAmelCase : int = prepare_img()
UpperCAmelCase : Union[str, Any] = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**__A )
# verify the logits
UpperCAmelCase : str = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Optional[Any] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@slow
@require_accelerate
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Union[str, Any] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''', device_map='''auto''' )
UpperCAmelCase : Tuple = prepare_img()
UpperCAmelCase : Optional[int] = image_processor(images=__A, return_tensors='''pt''' )
UpperCAmelCase : Dict = model(**__A )
UpperCAmelCase : Any = outputs.logits
# model predicts one of the 1000 ImageNet classes
UpperCAmelCase : Dict = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx], '''tabby, tabby cat''' )
| 336 | 0 |
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCAmelCase : Tuple = "3"
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 115 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def a__ ( ) -> tuple[list[int], int]:
UpperCAmelCase : str = [randint(-1_000 , 1_000 ) for i in range(10 )]
UpperCAmelCase : Any = randint(-5_000 , 5_000 )
return (arr, r)
_lowerCamelCase : Any = make_dataset()
def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, ...]:
for triplet in permutations(UpperCAmelCase , 3 ):
if sum(UpperCAmelCase ) == target:
return tuple(sorted(UpperCAmelCase ) )
return (0, 0, 0)
def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, int, int]:
arr.sort()
UpperCAmelCase : Tuple = len(UpperCAmelCase )
for i in range(n - 1 ):
UpperCAmelCase , UpperCAmelCase : int = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def a__ ( ) -> tuple[float, float]:
UpperCAmelCase : Union[str, Any] = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
UpperCAmelCase : Tuple = '''
triplet_sum1(*dataset)
'''
UpperCAmelCase : List[str] = '''
triplet_sum2(*dataset)
'''
UpperCAmelCase : Tuple = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 )
UpperCAmelCase : str = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 )
return (min(UpperCAmelCase ), min(UpperCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCamelCase : int = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 336 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 100 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __UpperCAmelCase :
def __magic_name__ ( self : int, __A : Dict ):
raise NotImplementedError()
def __magic_name__ ( self : int ):
raise NotImplementedError()
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : str, __A : "AutoTokenizer", __A : bool = False, **__A : str ):
UpperCAmelCase : List[str] = tokenizer
UpperCAmelCase : str = skip_prompt
UpperCAmelCase : List[str] = decode_kwargs
# variables used in the streaming process
UpperCAmelCase : Dict = []
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = True
def __magic_name__ ( self : Dict, __A : Optional[int] ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
UpperCAmelCase : Union[str, Any] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
UpperCAmelCase : Optional[int] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
UpperCAmelCase : Any = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
UpperCAmelCase : Union[str, Any] = text[self.print_len :]
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
# If the last token is a CJK character, we print the characters.
elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
UpperCAmelCase : Union[str, Any] = text[self.print_len :]
self.print_len += len(__A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
UpperCAmelCase : Optional[Any] = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(__A )
self.on_finalized_text(__A )
def __magic_name__ ( self : str ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
UpperCAmelCase : int = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
UpperCAmelCase : Dict = text[self.print_len :]
UpperCAmelCase : List[Any] = []
UpperCAmelCase : List[Any] = 0
else:
UpperCAmelCase : Dict = ''''''
UpperCAmelCase : str = True
self.on_finalized_text(__A, stream_end=__A )
def __magic_name__ ( self : List[str], __A : str, __A : bool = False ):
print(__A, flush=__A, end='''''' if not stream_end else None )
def __magic_name__ ( self : List[Any], __A : Optional[int] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Dict, __A : "AutoTokenizer", __A : bool = False, __A : Optional[float] = None, **__A : str ):
super().__init__(__A, __A, **__A )
UpperCAmelCase : Dict = Queue()
UpperCAmelCase : Any = None
UpperCAmelCase : Any = timeout
def __magic_name__ ( self : Dict, __A : str, __A : bool = False ):
self.text_queue.put(__A, timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal, timeout=self.timeout )
def __iter__( self : int ):
return self
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : List[Any] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 336 | 0 |
from manim import *
class snake_case_ ( lowerCamelCase__ ):
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
lowercase__ : List[Any] = Rectangle(height=0.5 , width=0.5 )
lowercase__ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowercase__ : Optional[Any] = [mem.copy() for i in range(6 )]
lowercase__ : Any = [mem.copy() for i in range(6 )]
lowercase__ : Any = VGroup(*__A ).arrange(__A , buff=0 )
lowercase__ : List[str] = VGroup(*__A ).arrange(__A , buff=0 )
lowercase__ : Optional[int] = VGroup(__A , __A ).arrange(__A , buff=0 )
lowercase__ : Union[str, Any] = Text("CPU" , font_size=24 )
lowercase__ : int = Group(__A , __A ).arrange(__A , buff=0.5 , aligned_edge=__A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__A )
lowercase__ : Dict = [mem.copy() for i in range(1 )]
lowercase__ : Optional[int] = VGroup(*__A ).arrange(__A , buff=0 )
lowercase__ : Dict = Text("GPU" , font_size=24 )
lowercase__ : List[str] = Group(__A , __A ).arrange(__A , buff=0.5 , aligned_edge=__A )
gpu.align_to(__A , __A )
gpu.set_x(gpu.get_x() - 1 )
self.add(__A )
lowercase__ : Optional[int] = [mem.copy() for i in range(6 )]
lowercase__ : List[Any] = VGroup(*__A ).arrange(__A , buff=0 )
lowercase__ : Any = Text("Model" , font_size=24 )
lowercase__ : Union[str, Any] = Group(__A , __A ).arrange(__A , buff=0.5 , aligned_edge=__A )
model.move_to([3, -1.0, 0] )
self.play(
Create(__A , run_time=1 ) , Create(__A , run_time=1 ) , Create(__A , run_time=1 ) , )
lowercase__ : Optional[int] = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
lowercase__ : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase__ : Optional[int] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__A , run_time=2.5 ) , Write(__A ) , Write(__A ) )
self.add(__A )
lowercase__ : Tuple = []
lowercase__ : Optional[int] = []
lowercase__ : str = []
for i, rect in enumerate(__A ):
lowercase__ : Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__A , opacity=0.7 )
cpu_target.move_to(__A )
cpu_target.generate_target()
lowercase__ : Optional[Any] = 0.46 / 4
lowercase__ : Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__A )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=__A , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=__A , buff=0.0 )
cpu_targs.append(__A )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__A ) )
second_animations.append(MoveToTarget(__A , run_time=1.5 ) )
self.play(*__A )
self.play(*__A )
self.wait()
| 87 |
import numpy
# List of input, output pairs
_lowerCamelCase : Dict = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
_lowerCamelCase : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
_lowerCamelCase : Dict = [2, 4, 1, 5]
_lowerCamelCase : Dict = len(train_data)
_lowerCamelCase : int = 0.0_0_9
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]="train" ) -> Dict:
return calculate_hypothesis_value(UpperCAmelCase , UpperCAmelCase ) - output(
UpperCAmelCase , UpperCAmelCase )
def a__ ( UpperCAmelCase : int ) -> Any:
UpperCAmelCase : str = 0
for i in range(len(UpperCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> Optional[int]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def a__ ( UpperCAmelCase : int , UpperCAmelCase : Optional[Any] ) -> List[str]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : str=m ) -> Dict:
UpperCAmelCase : Optional[int] = 0
for i in range(UpperCAmelCase ):
if index == -1:
summation_value += _error(UpperCAmelCase )
else:
summation_value += _error(UpperCAmelCase ) * train_data[i][0][index]
return summation_value
def a__ ( UpperCAmelCase : Dict ) -> Dict:
UpperCAmelCase : Dict = summation_of_cost_derivative(UpperCAmelCase , UpperCAmelCase ) / m
return cost_derivative_value
def a__ ( ) -> List[Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase : List[str] = 0.000002
UpperCAmelCase : Any = 0
UpperCAmelCase : Dict = 0
while True:
j += 1
UpperCAmelCase : List[Any] = [0, 0, 0, 0]
for i in range(0 , len(UpperCAmelCase ) ):
UpperCAmelCase : List[str] = get_cost_derivative(i - 1 )
UpperCAmelCase : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCAmelCase , UpperCAmelCase , atol=UpperCAmelCase , rtol=UpperCAmelCase , ):
break
UpperCAmelCase : int = temp_parameter_vector
print(('''Number of iterations:''', j) )
def a__ ( ) -> List[Any]:
for i in range(len(UpperCAmelCase ) ):
print(('''Actual output value:''', output(UpperCAmelCase , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(UpperCAmelCase , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 336 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ :
'''simple docstring'''
def __init__( self , __snake_case , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=10 , __snake_case=[10, 20, 30, 40] , __snake_case=[1, 1, 2, 1] , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=3 , __snake_case=None , ):
_SCREAMING_SNAKE_CASE : List[str] = parent
_SCREAMING_SNAKE_CASE : Tuple = batch_size
_SCREAMING_SNAKE_CASE : List[Any] = image_size
_SCREAMING_SNAKE_CASE : int = num_channels
_SCREAMING_SNAKE_CASE : Dict = embeddings_size
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_sizes
_SCREAMING_SNAKE_CASE : Union[str, Any] = depths
_SCREAMING_SNAKE_CASE : Optional[Any] = is_training
_SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
_SCREAMING_SNAKE_CASE : str = hidden_act
_SCREAMING_SNAKE_CASE : Dict = num_labels
_SCREAMING_SNAKE_CASE : List[str] = scope
_SCREAMING_SNAKE_CASE : Optional[int] = len(__A )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFRegNetModel(config=__A )
_SCREAMING_SNAKE_CASE : Optional[int] = model(__A , training=__A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Tuple = self.num_labels
_SCREAMING_SNAKE_CASE : List[str] = TFRegNetForImageClassification(__A )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(__A , labels=__A , training=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE : Optional[Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowercase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
A_ : Tuple = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
A_ : List[str] = False
A_ : str = False
A_ : int = False
A_ : Any = False
A_ : Union[str, Any] = False
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = TFRegNetModelTester(self )
_SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=__A , has_text_modality=__A )
def UpperCAmelCase_ ( self ):
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def UpperCAmelCase_ ( self ):
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Any = model_class(__A )
_SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE : Dict = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __A )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def UpperCAmelCase_ ( self ):
def check_hidden_states_output(__snake_case , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(__A )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(__A , __A ) , training=__A )
_SCREAMING_SNAKE_CASE : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__A ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Optional[int] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE : Tuple = layer_type
_SCREAMING_SNAKE_CASE : List[str] = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE : Any = True
check_hidden_states_output(__A , __A , __A )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(__snake_case , __snake_case , __snake_case , __snake_case={} ):
_SCREAMING_SNAKE_CASE : Optional[Any] = model(__A , return_dict=__A , **__A )
_SCREAMING_SNAKE_CASE : Any = model(__A , return_dict=__A , **__A ).to_tuple()
def recursive_check(__snake_case , __snake_case ):
if isinstance(__A , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__A , __A ):
recursive_check(__A , __A )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__A , __A ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(__A , __A )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__A )
_SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(__A , __A )
_SCREAMING_SNAKE_CASE : int = self._prepare_for_class(__A , __A )
check_equivalence(__A , __A , __A )
_SCREAMING_SNAKE_CASE : str = self._prepare_for_class(__A , __A , return_labels=__A )
_SCREAMING_SNAKE_CASE : int = self._prepare_for_class(__A , __A , return_labels=__A )
check_equivalence(__A , __A , __A )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(__A , __A )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(__A , __A )
check_equivalence(__A , __A , __A , {"""output_hidden_states""": True} )
_SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(__A , __A , return_labels=__A )
_SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(__A , __A , return_labels=__A )
check_equivalence(__A , __A , __A , {"""output_hidden_states""": True} )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def UpperCAmelCase_ ( self ):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : int = TFRegNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self ):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[int] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.default_image_processor
_SCREAMING_SNAKE_CASE : List[str] = prepare_img()
_SCREAMING_SNAKE_CASE : List[Any] = image_processor(images=__A , return_tensors="""tf""" )
# forward pass
_SCREAMING_SNAKE_CASE : str = model(**__A , training=__A )
# verify the logits
_SCREAMING_SNAKE_CASE : List[str] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __A )
_SCREAMING_SNAKE_CASE : Any = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __A , atol=1e-4 )
| 200 |
def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Optional[Any]:
UpperCAmelCase : List[str] = 0
UpperCAmelCase : List[Any] = len(UpperCAmelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCAmelCase ):
return None
UpperCAmelCase : Optional[Any] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
UpperCAmelCase : Any = left
UpperCAmelCase : List[str] = point
elif point > right:
UpperCAmelCase : Any = right
UpperCAmelCase : List[str] = point
else:
if item < current_item:
UpperCAmelCase : Optional[int] = point - 1
else:
UpperCAmelCase : str = point + 1
return None
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] ) -> Dict:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCAmelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
elif point > right:
return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , point - 1 )
else:
return interpolation_search_by_recursion(
UpperCAmelCase , UpperCAmelCase , point + 1 , UpperCAmelCase )
def a__ ( UpperCAmelCase : Union[str, Any] ) -> int:
if collection != sorted(UpperCAmelCase ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
_lowerCamelCase : Optional[int] = 0
if debug == 1:
_lowerCamelCase : Dict = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
_lowerCamelCase : List[Any] = 6_7
_lowerCamelCase : Optional[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 336 | 0 |
"""simple docstring"""
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 290 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : List[str]=False ) -> Any:
UpperCAmelCase : Optional[int] = '''backbone.''' if is_semantic else ''''''
UpperCAmelCase : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', '''beit.embeddings.cls_token'''),
(f'''{prefix}patch_embed.proj.weight''', '''beit.embeddings.patch_embeddings.projection.weight'''),
(f'''{prefix}patch_embed.proj.bias''', '''beit.embeddings.patch_embeddings.projection.bias'''),
(f'''{prefix}pos_embed''', '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str=False , UpperCAmelCase : Dict=False ) -> Any:
for i in range(config.num_hidden_layers ):
UpperCAmelCase : Tuple = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
UpperCAmelCase : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
UpperCAmelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase : str = q_bias
UpperCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : int = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCAmelCase : int = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
UpperCAmelCase : str = gamma_a
UpperCAmelCase : Dict = gamma_a
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase )
UpperCAmelCase : str = val
def a__ ( ) -> Optional[int]:
UpperCAmelCase : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Union[str, Any] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any]=False ) -> Union[str, Any]:
UpperCAmelCase : Dict = False if '''rvlcdip''' in checkpoint_url else True
UpperCAmelCase : Any = BeitConfig(use_absolute_position_embeddings=UpperCAmelCase , use_mask_token=UpperCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCAmelCase : List[Any] = 1_024
UpperCAmelCase : Optional[Any] = 4_096
UpperCAmelCase : Any = 24
UpperCAmelCase : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCAmelCase : Optional[Any] = 16
UpperCAmelCase : List[Any] = '''huggingface/label-files'''
UpperCAmelCase : Any = '''rvlcdip-id2label.json'''
UpperCAmelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Dict = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : Union[str, Any] = idalabel
UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCAmelCase : Tuple = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='''cpu''' )['''model''']
UpperCAmelCase : List[str] = create_rename_keys(UpperCAmelCase , has_lm_head=UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
read_in_q_k_v(UpperCAmelCase , UpperCAmelCase , has_lm_head=UpperCAmelCase )
# load HuggingFace model
UpperCAmelCase : Tuple = BeitForMaskedImageModeling(UpperCAmelCase ) if has_lm_head else BeitForImageClassification(UpperCAmelCase )
model.eval()
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image
UpperCAmelCase : Dict = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase )
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : Optional[Any] = image_processor(images=UpperCAmelCase , return_tensors='''pt''' )
UpperCAmelCase : str = encoding['''pixel_values''']
UpperCAmelCase : Any = model(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify logits
UpperCAmelCase : List[Any] = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(UpperCAmelCase ), "Shape of logits not as expected"
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
if has_lm_head:
UpperCAmelCase : List[Any] = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
UpperCAmelCase : Any = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCAmelCase , )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 336 | 0 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_A = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def a__ ( lowerCAmelCase=None ) -> Optional[int]:
if subparsers is not None:
UpperCAmelCase__ : Tuple = subparsers.add_parser("""tpu-config""" , description=_description )
else:
UpperCAmelCase__ : Dict = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
UpperCAmelCase__ : Optional[int] = parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=lowerCAmelCase , default=lowerCAmelCase , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=lowerCAmelCase , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=lowerCAmelCase , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
UpperCAmelCase__ : Union[str, Any] = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=lowerCAmelCase , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase )
return parser
def a__ ( lowerCAmelCase ) -> Union[str, Any]:
UpperCAmelCase__ : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowerCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCAmelCase__ : List[Any] = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCAmelCase__ : List[str] = defaults.commands
if not args.tpu_name:
UpperCAmelCase__ : Tuple = defaults.tpu_name
if not args.tpu_zone:
UpperCAmelCase__ : int = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCAmelCase__ : Tuple = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
UpperCAmelCase__ : Dict = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
UpperCAmelCase__ : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowerCAmelCase ):
UpperCAmelCase__ : int = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCAmelCase__ : Optional[int] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
UpperCAmelCase__ : int = '''; '''.join(lowerCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCAmelCase__ : Any = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {' '.join(lowerCAmelCase )}""" )
return
subprocess.run(lowerCAmelCase )
print("""Successfully setup pod.""" )
def a__ ( ) -> Any:
UpperCAmelCase__ : Any = tpu_command_parser()
UpperCAmelCase__ : Tuple = parser.parse_args()
tpu_command_launcher(lowerCAmelCase )
| 171 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[int], __A : Optional[int], __A : Any=1_3, __A : str=7, __A : Optional[int]=True, __A : Tuple=True, __A : Union[str, Any]=True, __A : Any=True, __A : Optional[int]=9_9, __A : Tuple=3_2, __A : str=5, __A : Union[str, Any]=4, __A : List[str]=3_7, __A : Tuple="gelu", __A : Optional[int]=0.1, __A : int=0.1, __A : Optional[Any]=5_1_2, __A : int=1_6, __A : Optional[Any]=2, __A : Union[str, Any]=0.0_2, __A : Optional[int]=4, ):
UpperCAmelCase : Any = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Any = seq_length
UpperCAmelCase : Tuple = is_training
UpperCAmelCase : str = use_attention_mask
UpperCAmelCase : List[str] = use_token_type_ids
UpperCAmelCase : int = use_labels
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : List[Any] = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = num_choices
def __magic_name__ ( self : str ):
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase : List[Any] = None
if self.use_attention_mask:
UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Any = None
if self.use_token_type_ids:
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = RobertaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__A, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def __magic_name__ ( self : int ):
UpperCAmelCase : Any = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = config_and_inputs
UpperCAmelCase : Any = True
UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = True
UpperCamelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Dict = FlaxRobertaModelTester(self )
@slow
def __magic_name__ ( self : Any ):
for model_class_name in self.all_model_classes:
UpperCAmelCase : Dict = model_class_name.from_pretrained('''roberta-base''', from_pt=__A )
UpperCAmelCase : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
| 336 | 0 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = []
for line in lines:
lowerCAmelCase_ = re.sub(r'''#.*''' , '''''' , _A ) # remove comments
if line:
filtered_lines.append(_A )
lowerCAmelCase_ = '''\n'''.join(_A )
# Make a hash from all this code
lowerCAmelCase_ = full_str.encode('''utf-8''' )
return shaaaa(_A ).hexdigest()
# get importable module names and hash for caching
_A = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_A = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_A = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
_A = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 278 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Dict = {"vocab_file": "vocab.txt"}
_lowerCamelCase : List[str] = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_lowerCamelCase : List[Any] = {
"facebook/esm2_t6_8M_UR50D": 1_0_2_4,
"facebook/esm2_t12_35M_UR50D": 1_0_2_4,
}
def a__ ( UpperCAmelCase : List[str] ) -> Any:
with open(UpperCAmelCase , '''r''' ) as f:
UpperCAmelCase : Dict = f.read().splitlines()
return [l.strip() for l in lines]
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Any, __A : Dict, __A : List[Any]="<unk>", __A : List[str]="<cls>", __A : Any="<pad>", __A : Union[str, Any]="<mask>", __A : int="<eos>", **__A : Tuple, ):
super().__init__(**__A )
UpperCAmelCase : Tuple = load_vocab_file(__A )
UpperCAmelCase : List[Any] = dict(enumerate(self.all_tokens ) )
UpperCAmelCase : str = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCAmelCase : Union[str, Any] = unk_token
UpperCAmelCase : Optional[Any] = cls_token
UpperCAmelCase : Optional[int] = pad_token
UpperCAmelCase : Optional[int] = mask_token
UpperCAmelCase : List[str] = eos_token
UpperCAmelCase : Optional[Any] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __magic_name__ ( self : Tuple, __A : int ):
return self._id_to_token.get(__A, self.unk_token )
def __magic_name__ ( self : List[Any], __A : str ):
return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) )
def __magic_name__ ( self : Any, __A : Optional[Any], **__A : Union[str, Any] ):
return text.split()
def __magic_name__ ( self : Optional[int], __A : Dict=False ):
return len(self._id_to_token )
def __magic_name__ ( self : int ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __magic_name__ ( self : Tuple, __A : str ):
return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) )
def __magic_name__ ( self : Any, __A : int ):
return self._id_to_token.get(__A, self.unk_token )
def __magic_name__ ( self : Union[str, Any], __A : List[int], __A : Optional[List[int]] = None ):
UpperCAmelCase : Optional[int] = [self.cls_token_id]
UpperCAmelCase : Optional[int] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __magic_name__ ( self : Any, __A : List, __A : Optional[List] = None, __A : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCAmelCase : Dict = [1] + ([0] * len(__A )) + [1]
if token_ids_a is not None:
mask += [0] * len(__A ) + [1]
return mask
def __magic_name__ ( self : Optional[int], __A : List[Any], __A : Dict ):
UpperCAmelCase : Union[str, Any] = os.path.join(__A, (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(__A, '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __magic_name__ ( self : Dict ):
return self.get_vocab_size(with_added_tokens=__A )
def __magic_name__ ( self : Optional[int], __A : Union[List[str], List[AddedToken]], __A : bool = False ):
return super()._add_tokens(__A, special_tokens=__A )
| 336 | 0 |
def _A ( ) -> int:
"""simple docstring"""
return [
a * b * (10_00 - a - b)
for a in range(1 , 9_99 )
for b in range(_lowercase , 9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 310 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A, '''tf_padding''' ) )
self.parent.assertTrue(hasattr(__A, '''depth_multiplier''' ) )
class __UpperCAmelCase :
def __init__( self : int, __A : List[Any], __A : str=1_3, __A : Dict=3, __A : int=3_2, __A : int=0.2_5, __A : List[str]=8, __A : int=8, __A : Dict=6, __A : str=3_2, __A : Any=True, __A : str=True, __A : int=True, __A : Union[str, Any]="relu6", __A : Any=1_2_8_0, __A : List[Any]=0.1, __A : Optional[Any]=0.0_2, __A : Tuple=True, __A : List[Any]=True, __A : str=1_0, __A : Optional[Any]=None, ):
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : str = image_size
UpperCAmelCase : Optional[int] = depth_multiplier
UpperCAmelCase : Union[str, Any] = depth_divisible_by
UpperCAmelCase : Optional[Any] = min_depth
UpperCAmelCase : List[str] = expand_ratio
UpperCAmelCase : Dict = tf_padding
UpperCAmelCase : str = output_stride
UpperCAmelCase : Union[str, Any] = first_layer_is_expansion
UpperCAmelCase : List[Any] = finegrained_output
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCAmelCase : Optional[Any] = classifier_dropout_prob
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : Tuple = num_labels
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Any = scope
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Dict = None
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size], self.num_labels )
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ ( self : Any ):
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, depth_divisible_by=self.depth_divisible_by, min_depth=self.min_depth, expand_ratio=self.expand_ratio, output_stride=self.output_stride, first_layer_is_expansion=self.first_layer_is_expansion, finegrained_output=self.finegrained_output, hidden_act=self.hidden_act, tf_padding=self.tf_padding, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def __magic_name__ ( self : List[Any], __A : Dict, __A : Optional[Any], __A : Optional[int], __A : Union[str, Any] ):
UpperCAmelCase : Any = MobileNetVaModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[Any] = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
self.parent.assertEqual(
result.pooler_output.shape, (self.batch_size, self.last_hidden_size), )
def __magic_name__ ( self : str, __A : Union[str, Any], __A : Dict, __A : Optional[Any], __A : str ):
UpperCAmelCase : Optional[int] = self.num_labels
UpperCAmelCase : Any = MobileNetVaForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[int] = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[Any], __A : Optional[Any], __A : List[str], __A : Dict, __A : Dict ):
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Dict = MobileNetVaForSemanticSegmentation(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
UpperCAmelCase : Optional[Any] = model(__A, labels=__A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = config_and_inputs
UpperCAmelCase : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[Any] = MobileNetVaModelTester(self )
UpperCAmelCase : List[Any] = MobileNetVaConfigTester(self, config_class=__A, has_text_modality=__A )
def __magic_name__ ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def __magic_name__ ( self : Optional[int] ):
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def __magic_name__ ( self : Tuple ):
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def __magic_name__ ( self : Any ):
pass
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(__A )
UpperCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : int ):
def check_hidden_states_output(__A : Any, __A : Optional[Any], __A : str ):
UpperCAmelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(__A, __A ) )
UpperCAmelCase : Optional[Any] = outputs.hidden_states
UpperCAmelCase : List[Any] = 1_6
self.assertEqual(len(__A ), __A )
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : int ):
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
@slow
def __magic_name__ ( self : Dict ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] = MobileNetVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> int:
UpperCAmelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : List[Any] ):
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : List[Any] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(__A )
UpperCAmelCase : Optional[int] = self.default_image_processor
UpperCAmelCase : Optional[Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : str = model(**__A )
# verify the logits
UpperCAmelCase : int = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Tuple = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@slow
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCAmelCase : List[Any] = model.to(__A )
UpperCAmelCase : Tuple = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : int = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(**__A )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify the logits
UpperCAmelCase : Tuple = torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape, __A )
UpperCAmelCase : Tuple = torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
], device=__A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], __A, atol=1E-4 ) )
| 336 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
_SCREAMING_SNAKE_CASE = 'LayoutLMv3ImageProcessor'
_SCREAMING_SNAKE_CASE = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Optional[Any] , _lowerCAmelCase : Any=None , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
snake_case_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __A , )
snake_case_ = kwargs.pop("feature_extractor" )
snake_case_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__A , __A )
def __call__( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowerCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _lowerCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , _lowerCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , _lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : int = 0 , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , **_lowerCAmelCase : Tuple , ) -> Dict:
"""simple docstring"""
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
snake_case_ = self.image_processor(images=__A , return_tensors=__A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__A , __A ):
snake_case_ = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case_ = features['''words''']
snake_case_ = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
# add pixel values
snake_case_ = features.pop("pixel_values" )
if return_overflowing_tokens is True:
snake_case_ = self.get_overflowing_images(__A , encoded_inputs["overflow_to_sample_mapping"] )
snake_case_ = images
return encoded_inputs
def lowerCAmelCase__ ( self : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any ) -> List[Any]:
"""simple docstring"""
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
snake_case_ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__A ) != len(__A ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F''' {len(__A )} and {len(__A )}''' )
return images_with_overflow
def lowerCAmelCase__ ( self : Any , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def lowerCAmelCase__ ( self : Any , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def lowerCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def lowerCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __A , )
return self.image_processor_class
@property
def lowerCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __A , )
return self.image_processor
| 159 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """codegen"""
UpperCamelCase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any, __A : Optional[int]=5_0_4_0_0, __A : Tuple=2_0_4_8, __A : Optional[int]=2_0_4_8, __A : List[str]=4_0_9_6, __A : List[str]=2_8, __A : Union[str, Any]=1_6, __A : Tuple=6_4, __A : Union[str, Any]=None, __A : Union[str, Any]="gelu_new", __A : Any=0.0, __A : Dict=0.0, __A : str=0.0, __A : Optional[int]=1E-5, __A : Any=0.0_2, __A : Any=True, __A : Union[str, Any]=5_0_2_5_6, __A : List[str]=5_0_2_5_6, __A : int=False, **__A : List[Any], ):
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Tuple = n_ctx
UpperCAmelCase : Tuple = n_positions
UpperCAmelCase : Optional[int] = n_embd
UpperCAmelCase : Union[str, Any] = n_layer
UpperCAmelCase : List[str] = n_head
UpperCAmelCase : Tuple = n_inner
UpperCAmelCase : int = rotary_dim
UpperCAmelCase : List[Any] = activation_function
UpperCAmelCase : List[str] = resid_pdrop
UpperCAmelCase : Optional[Any] = embd_pdrop
UpperCAmelCase : str = attn_pdrop
UpperCAmelCase : Tuple = layer_norm_epsilon
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : Any = bos_token_id
UpperCAmelCase : List[str] = eos_token_id
super().__init__(
bos_token_id=__A, eos_token_id=__A, tie_word_embeddings=__A, **__A )
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Any, __A : PretrainedConfig, __A : str = "default", __A : List[PatchingSpec] = None, __A : bool = False, ):
super().__init__(__A, task=__A, patching_specs=__A, use_past=__A )
if not getattr(self._config, '''pad_token_id''', __A ):
# TODO: how to do that better?
UpperCAmelCase : Union[str, Any] = 0
@property
def __magic_name__ ( self : str ):
UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__A, direction='''inputs''' )
UpperCAmelCase : int = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __magic_name__ ( self : Dict ):
return self._config.n_layer
@property
def __magic_name__ ( self : List[str] ):
return self._config.n_head
def __magic_name__ ( self : str, __A : PreTrainedTokenizer, __A : int = -1, __A : int = -1, __A : bool = False, __A : Optional[TensorType] = None, ):
UpperCAmelCase : Union[str, Any] = super(__A, self ).generate_dummy_inputs(
__A, batch_size=__A, seq_length=__A, is_pair=__A, framework=__A )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase : str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase : str = seqlen + 2
UpperCAmelCase : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase : Optional[int] = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
UpperCAmelCase : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
UpperCAmelCase : Optional[Any] = ordered_inputs['''attention_mask'''].dtype
UpperCAmelCase : Dict = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__A, __A, dtype=__A )], dim=1 )
return ordered_inputs
@property
def __magic_name__ ( self : Tuple ):
return 1_3
| 336 | 0 |
"""simple docstring"""
_a = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
_a = {value: key for key, value in encode_dict.items()}
def _A ( UpperCamelCase_ : str) -> str:
'''simple docstring'''
__lowercase = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces")
return encoded
def _A ( UpperCamelCase_ : str) -> str:
'''simple docstring'''
if set(UpperCamelCase_) - {"A", "B", " "} != set():
raise Exception("decode() accepts only \'A\', \'B\' and spaces")
__lowercase = ''''''
for word in coded.split():
while len(UpperCamelCase_) != 0:
decoded += decode_dict[word[:5]]
__lowercase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 17 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 336 | 0 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class _lowercase ( ctypes.Structure):
"""simple docstring"""
A__ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def lowercase_ ( ):
"""simple docstring"""
if os.name == "nt":
lowerCamelCase__ : List[str] = CursorInfo()
lowerCamelCase__ : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) )
lowerCamelCase__ : Dict = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def lowercase_ ( ):
"""simple docstring"""
if os.name == "nt":
lowerCamelCase__ : int = CursorInfo()
lowerCamelCase__ : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) )
lowerCamelCase__ : Any = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def lowercase_ ( ):
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 184 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __UpperCAmelCase :
# setable values
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None # sigma(t_i)
@classmethod
def __magic_name__ ( cls : Any ):
return cls()
@dataclass
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@property
def __magic_name__ ( self : Optional[int] ):
return True
@register_to_config
def __init__( self : Optional[int], __A : float = 0.0_2, __A : float = 1_0_0, __A : float = 1.0_0_7, __A : float = 8_0, __A : float = 0.0_5, __A : float = 5_0, ):
pass
def __magic_name__ ( self : Optional[Any] ):
return KarrasVeSchedulerState.create()
def __magic_name__ ( self : int, __A : KarrasVeSchedulerState, __A : int, __A : Tuple = () ):
UpperCAmelCase : Optional[Any] = jnp.arange(0, __A )[::-1].copy()
UpperCAmelCase : Union[str, Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__A, schedule=jnp.array(__A, dtype=jnp.floataa ), timesteps=__A, )
def __magic_name__ ( self : List[Any], __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : random.KeyArray, ):
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase : int = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1 )
else:
UpperCAmelCase : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase : Union[str, Any] = random.split(__A, num=1 )
UpperCAmelCase : List[str] = self.config.s_noise * random.normal(key=__A, shape=sample.shape )
UpperCAmelCase : Tuple = sigma + gamma * sigma
UpperCAmelCase : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : int = sample_hat + sigma_hat * model_output
UpperCAmelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase : int = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : jnp.ndarray, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : Tuple = sample_prev + sigma_prev * model_output
UpperCAmelCase : List[str] = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Optional[Any], __A : KarrasVeSchedulerState, __A : Optional[int], __A : int, __A : Union[str, Any] ):
raise NotImplementedError()
| 336 | 0 |
"""simple docstring"""
UpperCAmelCase : List[str] = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 115 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __UpperCAmelCase ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
UpperCamelCase = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def a__ ( ) -> Dict:
if os.name == "nt":
UpperCAmelCase : List[str] = CursorInfo()
UpperCAmelCase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
UpperCAmelCase : Dict = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def a__ ( ) -> Optional[int]:
if os.name == "nt":
UpperCAmelCase : int = CursorInfo()
UpperCAmelCase : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
UpperCAmelCase : Any = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def a__ ( ) -> Optional[Any]:
try:
hide_cursor()
yield
finally:
show_cursor()
| 336 | 0 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__magic_name__ = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
__magic_name__ = F"""https://www.google.com/search?q={query}&num=100"""
__magic_name__ = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
__magic_name__ = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
__magic_name__ = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 100 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCamelCase : Tuple = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 336 | 0 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
UpperCamelCase = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
UpperCamelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase_ ( _lowerCamelCase : str):
if "://" in dataset_path:
lowercase__ : Optional[Any] = dataset_path.split("://")[1]
return dataset_path
def lowercase_ ( _lowerCamelCase : fsspec.AbstractFileSystem):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase_ ( _lowerCamelCase : fsspec.AbstractFileSystem , _lowerCamelCase : str , _lowerCamelCase : str):
lowercase__ : Any = not is_remote_filesystem(_lowerCamelCase)
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_lowerCamelCase) , fs._strip_protocol(_lowerCamelCase))
else:
fs.mv(_lowerCamelCase , _lowerCamelCase , recursive=_lowerCamelCase)
def lowercase_ ( ):
if hasattr(fsspec.asyn , "reset_lock"):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowercase__ : Tuple = None
lowercase__ : str = None
lowercase__ : Tuple = threading.Lock()
| 87 |
from __future__ import annotations
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int ) -> list[str]:
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
UpperCAmelCase : str = number_of_bytes // partitions
UpperCAmelCase : Dict = []
for i in range(UpperCAmelCase ):
UpperCAmelCase : int = i * bytes_per_partition + 1
UpperCAmelCase : Optional[int] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 0 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase_ : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt')
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1_6000 ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = int(round(sample_rate * max_length ) )
if len(SCREAMING_SNAKE_CASE__ ) <= sample_length:
return wav
_SCREAMING_SNAKE_CASE : str = randint(0 , len(SCREAMING_SNAKE_CASE__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class lowercase__ :
'''simple docstring'''
A_ : Tuple = field(default=lowerCamelCase__ , metadata={"""help""": """Name of a dataset from the datasets package"""} )
A_ : Any = field(
default=lowerCamelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
A_ : List[str] = field(
default=lowerCamelCase__ , metadata={"""help""": """A file containing the training audio paths and labels."""} )
A_ : Any = field(
default=lowerCamelCase__ , metadata={"""help""": """A file containing the validation audio paths and labels."""} )
A_ : Tuple = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
A_ : Dict = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
A_ : Any = field(
default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , )
A_ : Optional[Any] = field(
default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""} )
A_ : Any = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
A_ : Optional[Any] = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
A_ : Tuple = field(
default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , )
@dataclass
class lowercase__ :
'''simple docstring'''
A_ : List[Any] = field(
default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
A_ : Optional[int] = field(
default=lowerCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
A_ : List[Any] = field(
default=lowerCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} )
A_ : Optional[Any] = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
A_ : Optional[int] = field(
default=lowerCamelCase__ , metadata={"""help""": """Name or path of preprocessor config."""} )
A_ : int = field(
default=lowerCamelCase__ , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} )
A_ : Optional[int] = field(
default=lowerCamelCase__ , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} )
A_ : List[Any] = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
A_ : Dict = field(
default=lowerCamelCase__ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
A_ : List[Any] = field(
default=lowerCamelCase__ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def UpperCAmelCase_ ( self ):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , __A , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_SCREAMING_SNAKE_CASE : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : Optional[int] = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_SCREAMING_SNAKE_CASE : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_SCREAMING_SNAKE_CASE : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
_SCREAMING_SNAKE_CASE : List[Any] = DatasetDict()
_SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_SCREAMING_SNAKE_CASE : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. """
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
f"""{", ".join(raw_datasets["train"].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. """
"""Make sure to set `--label_column_name` to the correct text column - one of """
f"""{", ".join(raw_datasets["train"].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_SCREAMING_SNAKE_CASE : int = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_SCREAMING_SNAKE_CASE : Dict = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_SCREAMING_SNAKE_CASE : str = feature_extractor.model_input_names[0]
def train_transforms(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : int = []
for audio in batch[data_args.audio_column_name]:
_SCREAMING_SNAKE_CASE : List[Any] = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=feature_extractor.sampling_rate )
_SCREAMING_SNAKE_CASE : Dict = {model_input_name: inputs.get(SCREAMING_SNAKE_CASE__ )}
_SCREAMING_SNAKE_CASE : Union[str, Any] = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : Optional[Any] = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
_SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=feature_extractor.sampling_rate )
_SCREAMING_SNAKE_CASE : str = {model_input_name: inputs.get(SCREAMING_SNAKE_CASE__ )}
_SCREAMING_SNAKE_CASE : str = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_SCREAMING_SNAKE_CASE : int = raw_datasets['''train'''].features[data_args.label_column_name].names
_SCREAMING_SNAKE_CASE : Any = {}, {}
for i, label in enumerate(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = str(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : str = label
# Load the accuracy metric from the datasets package
_SCREAMING_SNAKE_CASE : Dict = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : List[Any] = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE__ , references=eval_pred.label_ids )
_SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE__ ) , labelaid=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_SCREAMING_SNAKE_CASE : Any = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_SCREAMING_SNAKE_CASE : int = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(SCREAMING_SNAKE_CASE__ , output_all_columns=SCREAMING_SNAKE_CASE__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_SCREAMING_SNAKE_CASE : Dict = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(SCREAMING_SNAKE_CASE__ , output_all_columns=SCREAMING_SNAKE_CASE__ )
# Initialize our trainer
_SCREAMING_SNAKE_CASE : List[str] = Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
_SCREAMING_SNAKE_CASE : List[str] = None
if training_args.resume_from_checkpoint is not None:
_SCREAMING_SNAKE_CASE : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_SCREAMING_SNAKE_CASE : Optional[int] = last_checkpoint
_SCREAMING_SNAKE_CASE : str = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_SCREAMING_SNAKE_CASE : str = trainer.evaluate()
trainer.log_metrics("""eval""" , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics("""eval""" , SCREAMING_SNAKE_CASE__ )
# Write model card and (optionally) push to hub
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 200 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_lowerCamelCase : Union[str, Any] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def a__ ( UpperCAmelCase : Dict=None ) -> Optional[int]:
if subparsers is not None:
UpperCAmelCase : Tuple = subparsers.add_parser('''tpu-config''' , description=_description )
else:
UpperCAmelCase : Dict = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
UpperCAmelCase : Optional[int] = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=UpperCAmelCase , default=UpperCAmelCase , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=UpperCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=UpperCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
UpperCAmelCase : Union[str, Any] = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=UpperCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase )
return parser
def a__ ( UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCAmelCase : List[Any] = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCAmelCase : List[str] = defaults.commands
if not args.tpu_name:
UpperCAmelCase : Tuple = defaults.tpu_name
if not args.tpu_zone:
UpperCAmelCase : int = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCAmelCase : Tuple = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
UpperCAmelCase : Dict = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , UpperCAmelCase ):
UpperCAmelCase : Optional[int] = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
UpperCAmelCase : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , UpperCAmelCase ):
UpperCAmelCase : int = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCAmelCase : Optional[int] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
UpperCAmelCase : int = '''; '''.join(UpperCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCAmelCase : Any = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {" ".join(UpperCAmelCase )}''' )
return
subprocess.run(UpperCAmelCase )
print('''Successfully setup pod.''' )
def a__ ( ) -> Any:
UpperCAmelCase : Any = tpu_command_parser()
UpperCAmelCase : Tuple = parser.parse_args()
tpu_command_launcher(UpperCAmelCase )
| 336 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __snake_case ( lowerCamelCase__ ):
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: str = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(__A , 'tf_padding'))
self.parent.assertTrue(hasattr(__A , 'depth_multiplier'))
class __snake_case :
def __init__( self , lowercase , lowercase=13 , lowercase=3 , lowercase=32 , lowercase=0.25 , lowercase=8 , lowercase=8 , lowercase=6 , lowercase=32 , lowercase=True , lowercase=True , lowercase=True , lowercase="relu6" , lowercase=12_80 , lowercase=0.1 , lowercase=0.02 , lowercase=True , lowercase=True , lowercase=10 , lowercase=None , ) -> Any:
'''simple docstring'''
a__: Optional[int] = parent
a__: List[str] = batch_size
a__: List[str] = num_channels
a__: str = image_size
a__: Optional[int] = depth_multiplier
a__: Union[str, Any] = depth_divisible_by
a__: Optional[Any] = min_depth
a__: List[str] = expand_ratio
a__: Dict = tf_padding
a__: str = output_stride
a__: Union[str, Any] = first_layer_is_expansion
a__: List[Any] = finegrained_output
a__: Optional[Any] = hidden_act
a__: str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier)
a__: Optional[Any] = classifier_dropout_prob
a__: Dict = use_labels
a__: List[str] = is_training
a__: Tuple = num_labels
a__: Union[str, Any] = initializer_range
a__: Any = scope
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__: Dict = None
a__: Any = None
if self.use_labels:
a__: Dict = ids_tensor([self.batch_size] , self.num_labels)
a__: Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
a__: Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__: Any = MobileNetVaModel(config=__A)
model.to(__A)
model.eval()
a__: Optional[Any] = model(__A)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[int] = self.num_labels
a__: Any = MobileNetVaForImageClassification(__A)
model.to(__A)
model.eval()
a__: Optional[int] = model(__A , labels=__A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
a__: Tuple = self.num_labels
a__: Dict = MobileNetVaForSemanticSegmentation(__A)
model.to(__A)
model.eval()
a__: Dict = model(__A)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
a__: Optional[Any] = model(__A , labels=__A)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: List[str] = self.prepare_config_and_inputs()
a__: int = config_and_inputs
a__: Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
a__ = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a__ = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: List[Any] = MobileNetVaModelTester(self)
a__: List[Any] = MobileNetVaConfigTester(self , config_class=__A , has_text_modality=__A)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds')
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings')
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not output attentions')
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__: Optional[Any] = model_class(__A)
a__: Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__: Union[str, Any] = [*signature.parameters.keys()]
a__: Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __A)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(lowercase , lowercase , lowercase):
a__: Union[str, Any] = model_class(__A)
model.to(__A)
model.eval()
with torch.no_grad():
a__: Dict = model(**self._prepare_for_class(__A , __A))
a__: Optional[Any] = outputs.hidden_states
a__: List[Any] = 16
self.assertEqual(len(__A) , __A)
a__: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__: Tuple = True
check_hidden_states_output(__A , __A , __A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__: Tuple = True
check_hidden_states_output(__A , __A , __A)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A)
@slow
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: Optional[Any] = MobileNetVaModel.from_pretrained(__A)
self.assertIsNotNone(__A)
def __a ( ) ->int:
a__: Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224') if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: List[Any] = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224').to(__A)
a__: Optional[int] = self.default_image_processor
a__: Optional[Any] = prepare_img()
a__: Dict = image_processor(images=__A , return_tensors='pt').to(__A)
# forward pass
with torch.no_grad():
a__: str = model(**__A)
# verify the logits
a__: int = torch.Size((1, 10_01))
self.assertEqual(outputs.logits.shape , __A)
a__: Tuple = torch.tensor([0.2445, -1.1993, 0.1905]).to(__A)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4))
@slow
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Tuple = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513')
a__: List[Any] = model.to(__A)
a__: Tuple = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513')
a__: List[Any] = prepare_img()
a__: int = image_processor(images=__A , return_tensors='pt').to(__A)
# forward pass
with torch.no_grad():
a__: Union[str, Any] = model(**__A)
a__: Optional[Any] = outputs.logits
# verify the logits
a__: Tuple = torch.Size((1, 21, 65, 65))
self.assertEqual(logits.shape , __A)
a__: Tuple = torch.tensor(
[
[[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=__A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __A , atol=1e-4))
| 290 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
print('''Loading config file...''' )
def flatten_yaml_as_dict(UpperCAmelCase : Tuple , UpperCAmelCase : Any="" , UpperCAmelCase : Dict="." ):
UpperCAmelCase : List[str] = []
for k, v in d.items():
UpperCAmelCase : List[Any] = parent_key + sep + k if parent_key else k
if isinstance(UpperCAmelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCAmelCase , UpperCAmelCase , sep=UpperCAmelCase ).items() )
else:
items.append((new_key, v) )
return dict(UpperCAmelCase )
UpperCAmelCase : List[str] = argparse.Namespace()
with open(UpperCAmelCase , '''r''' ) as yaml_file:
try:
UpperCAmelCase : List[str] = yaml.load(UpperCAmelCase , Loader=yaml.FullLoader )
UpperCAmelCase : Optional[int] = flatten_yaml_as_dict(UpperCAmelCase )
for k, v in flat_cfg.items():
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(UpperCAmelCase , str(UpperCAmelCase ) ) )
return config
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> List[Any]:
UpperCAmelCase : int = MobileViTVaConfig()
UpperCAmelCase : str = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
UpperCAmelCase : Any = 1_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase : Any = 384
else:
UpperCAmelCase : Tuple = 256
UpperCAmelCase : int = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
UpperCAmelCase : Optional[Any] = 21_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase : str = 384
else:
UpperCAmelCase : Dict = 256
UpperCAmelCase : List[Any] = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
UpperCAmelCase : Optional[Any] = 151
UpperCAmelCase : Tuple = 512
UpperCAmelCase : Tuple = '''ade20k-id2label.json'''
UpperCAmelCase : Tuple = True
elif task_name.startswith('''voc_''' ):
UpperCAmelCase : Dict = 21
UpperCAmelCase : str = 512
UpperCAmelCase : Union[str, Any] = '''pascal-voc-id2label.json'''
UpperCAmelCase : Dict = True
# orig_config
UpperCAmelCase : List[Any] = load_orig_config_file(UpperCAmelCase )
assert getattr(UpperCAmelCase , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
UpperCAmelCase : Tuple = getattr(UpperCAmelCase , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(UpperCAmelCase , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCAmelCase : int = getattr(UpperCAmelCase , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCAmelCase : str = getattr(UpperCAmelCase , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
UpperCAmelCase : int = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
UpperCAmelCase : Any = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
UpperCAmelCase : Optional[Any] = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
UpperCAmelCase : Union[str, Any] = '''huggingface/label-files'''
UpperCAmelCase : List[Any] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Any = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : int = idalabel
UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] ) -> List[str]:
UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase )
UpperCAmelCase : List[str] = val
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=False ) -> Union[str, Any]:
if base_model:
UpperCAmelCase : Dict = ''''''
else:
UpperCAmelCase : Dict = '''mobilevitv2.'''
UpperCAmelCase : Optional[int] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCAmelCase : List[str] = k[8:]
else:
UpperCAmelCase : Dict = k
if ".block." in k:
UpperCAmelCase : List[Any] = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
UpperCAmelCase : List[str] = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
UpperCAmelCase : Union[str, Any] = k_new.replace('''conv_1.''' , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
UpperCAmelCase : Optional[Any] = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
UpperCAmelCase : int = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
UpperCAmelCase : Any = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
UpperCAmelCase : str = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
UpperCAmelCase : int = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
UpperCAmelCase : Dict = [0, 1]
elif i == 4:
UpperCAmelCase : Dict = [0, 1, 2, 3]
elif i == 5:
UpperCAmelCase : int = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
UpperCAmelCase : Optional[Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
UpperCAmelCase : Any = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
UpperCAmelCase : Optional[Any] = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
UpperCAmelCase : List[Any] = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
UpperCAmelCase : List[Any] = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
UpperCAmelCase : Any = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
UpperCAmelCase : Union[str, Any] = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
UpperCAmelCase : Tuple = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def a__ ( UpperCAmelCase : Union[str, Any] ) -> Any:
UpperCAmelCase : str = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(UpperCAmelCase )
for k in keys_to_ignore:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def a__ ( ) -> Union[str, Any]:
UpperCAmelCase : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCAmelCase : List[str] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = get_mobilevitva_config(UpperCAmelCase , UpperCAmelCase )
# load original state_dict
UpperCAmelCase : List[str] = torch.load(UpperCAmelCase , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
UpperCAmelCase : str = MobileViTVaForSemanticSegmentation(UpperCAmelCase ).eval()
UpperCAmelCase : str = False
else:
UpperCAmelCase : Union[str, Any] = MobileViTVaForImageClassification(UpperCAmelCase ).eval()
UpperCAmelCase : Any = False
# remove and rename some keys of load the original model
UpperCAmelCase : Optional[Any] = checkpoint
remove_unused_keys(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = create_rename_keys(UpperCAmelCase , base_model=UpperCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# load modified state_dict
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase : Dict = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase : Any = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase : Union[str, Any] = model(**UpperCAmelCase )
# verify classification model
if task_name.startswith('''imagenet''' ):
UpperCAmelCase : Optional[Any] = outputs.logits
UpperCAmelCase : int = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCAmelCase : str = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] )
assert torch.allclose(logits[0, :3] , UpperCAmelCase , atol=1E-4 )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 336 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase = 4_00_00_00 ) -> int:
UpperCAmelCase__ : int = [0, 1]
UpperCAmelCase__ : int = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
UpperCAmelCase__ : str = 0
for j in range(len(lowerCAmelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 171 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __UpperCAmelCase ( lowerCamelCase__ ):
def __get__( self : Tuple, __A : Optional[Any], __A : Optional[int]=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
UpperCAmelCase : str = '''__cached_''' + self.fget.__name__
UpperCAmelCase : int = getattr(__A, __A, __A )
if cached is None:
UpperCAmelCase : Any = self.fget(__A )
setattr(__A, __A, __A )
return cached
def a__ ( UpperCAmelCase : Optional[Any] ) -> Any:
UpperCAmelCase : Any = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
if is_torch_fx_proxy(UpperCAmelCase ):
return True
if is_torch_available():
import torch
if isinstance(UpperCAmelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(UpperCAmelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(UpperCAmelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(UpperCAmelCase , np.ndarray )
def a__ ( UpperCAmelCase : List[Any] ) -> Union[str, Any]:
return isinstance(UpperCAmelCase , np.ndarray )
def a__ ( UpperCAmelCase : str ) -> Tuple:
return _is_numpy(UpperCAmelCase )
def a__ ( UpperCAmelCase : str ) -> List[Any]:
import torch
return isinstance(UpperCAmelCase , torch.Tensor )
def a__ ( UpperCAmelCase : str ) -> List[Any]:
return False if not is_torch_available() else _is_torch(UpperCAmelCase )
def a__ ( UpperCAmelCase : Tuple ) -> List[str]:
import torch
return isinstance(UpperCAmelCase , torch.device )
def a__ ( UpperCAmelCase : Any ) -> Any:
return False if not is_torch_available() else _is_torch_device(UpperCAmelCase )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
import torch
if isinstance(UpperCAmelCase , UpperCAmelCase ):
if hasattr(UpperCAmelCase , UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase )
else:
return False
return isinstance(UpperCAmelCase , torch.dtype )
def a__ ( UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
return False if not is_torch_available() else _is_torch_dtype(UpperCAmelCase )
def a__ ( UpperCAmelCase : Any ) -> str:
import tensorflow as tf
return isinstance(UpperCAmelCase , tf.Tensor )
def a__ ( UpperCAmelCase : int ) -> Union[str, Any]:
return False if not is_tf_available() else _is_tensorflow(UpperCAmelCase )
def a__ ( UpperCAmelCase : List[str] ) -> Tuple:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(UpperCAmelCase , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(UpperCAmelCase )
return type(UpperCAmelCase ) == tf.Tensor
def a__ ( UpperCAmelCase : int ) -> List[Any]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCAmelCase )
def a__ ( UpperCAmelCase : List[Any] ) -> Dict:
import jax.numpy as jnp # noqa: F811
return isinstance(UpperCAmelCase , jnp.ndarray )
def a__ ( UpperCAmelCase : List[Any] ) -> Optional[int]:
return False if not is_flax_available() else _is_jax(UpperCAmelCase )
def a__ ( UpperCAmelCase : int ) -> Tuple:
if isinstance(UpperCAmelCase , (dict, UserDict) ):
return {k: to_py_obj(UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(UpperCAmelCase , (list, tuple) ):
return [to_py_obj(UpperCAmelCase ) for o in obj]
elif is_tf_tensor(UpperCAmelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(UpperCAmelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(UpperCAmelCase ):
return np.asarray(UpperCAmelCase ).tolist()
elif isinstance(UpperCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a__ ( UpperCAmelCase : Any ) -> List[str]:
if isinstance(UpperCAmelCase , (dict, UserDict) ):
return {k: to_numpy(UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(UpperCAmelCase , (list, tuple) ):
return np.array(UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
return obj.numpy()
elif is_torch_tensor(UpperCAmelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(UpperCAmelCase ):
return np.asarray(UpperCAmelCase )
else:
return obj
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Optional[Any] = fields(self )
# Safety and consistency checks
if not len(__A ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
UpperCAmelCase : int = getattr(self, class_fields[0].name )
UpperCAmelCase : str = all(getattr(self, field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__A ):
if isinstance(__A, __A ):
UpperCAmelCase : Tuple = first_field.items()
UpperCAmelCase : Any = True
else:
try:
UpperCAmelCase : Optional[Any] = iter(__A )
UpperCAmelCase : Optional[Any] = True
except TypeError:
UpperCAmelCase : Optional[int] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__A ):
if (
not isinstance(__A, (list, tuple) )
or not len(__A ) == 2
or not isinstance(element[0], __A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase : Any = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self, element[0], element[1] )
if element[1] is not None:
UpperCAmelCase : Union[str, Any] = element[1]
elif first_field is not None:
UpperCAmelCase : Union[str, Any] = first_field
else:
for field in class_fields:
UpperCAmelCase : Optional[Any] = getattr(self, field.name )
if v is not None:
UpperCAmelCase : Optional[int] = v
def __delitem__( self : Union[str, Any], *__A : str, **__A : Tuple ):
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : List[str], *__A : Union[str, Any], **__A : Optional[Any] ):
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : Any, *__A : Dict, **__A : str ):
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : Dict, *__A : int, **__A : Dict ):
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : List[str], __A : List[str] ):
if isinstance(__A, __A ):
UpperCAmelCase : int = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[Any], __A : Dict, __A : Union[str, Any] ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__A, __A )
super().__setattr__(__A, __A )
def __setitem__( self : Dict, __A : List[Any], __A : Union[str, Any] ):
# Will raise a KeyException if needed
super().__setitem__(__A, __A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__A, __A )
def __magic_name__ ( self : List[str] ):
return tuple(self[k] for k in self.keys() )
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@classmethod
def __magic_name__ ( cls : List[Any], __A : Tuple ):
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """longest"""
UpperCamelCase = """max_length"""
UpperCamelCase = """do_not_pad"""
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """pt"""
UpperCamelCase = """tf"""
UpperCamelCase = """np"""
UpperCamelCase = """jax"""
class __UpperCAmelCase :
def __init__( self : Any, __A : List[ContextManager] ):
UpperCAmelCase : Tuple = context_managers
UpperCAmelCase : Tuple = ExitStack()
def __enter__( self : Any ):
for context_manager in self.context_managers:
self.stack.enter_context(__A )
def __exit__( self : List[Any], *__A : Union[str, Any], **__A : Dict ):
self.stack.__exit__(*__A, **__A )
def a__ ( UpperCAmelCase : Union[str, Any] ) -> str:
UpperCAmelCase : int = infer_framework(UpperCAmelCase )
if framework == "tf":
UpperCAmelCase : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a__ ( UpperCAmelCase : Dict ) -> Any:
UpperCAmelCase : List[Any] = model_class.__name__
UpperCAmelCase : Union[str, Any] = infer_framework(UpperCAmelCase )
if framework == "tf":
UpperCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : Dict = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a__ ( UpperCAmelCase : MutableMapping , UpperCAmelCase : str = "" , UpperCAmelCase : str = "." ) -> Union[str, Any]:
def _flatten_dict(UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]="" , UpperCAmelCase : Any="." ):
for k, v in d.items():
UpperCAmelCase : List[str] = str(UpperCAmelCase ) + delimiter + str(UpperCAmelCase ) if parent_key else k
if v and isinstance(UpperCAmelCase , UpperCAmelCase ):
yield from flatten_dict(UpperCAmelCase , UpperCAmelCase , delimiter=UpperCAmelCase ).items()
else:
yield key, v
return dict(_flatten_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
@contextmanager
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : bool = False ) -> Optional[Any]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=None ) -> Optional[Any]:
if is_numpy_array(UpperCAmelCase ):
return np.transpose(UpperCAmelCase , axes=UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.T if axes is None else array.permute(*UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.transpose(UpperCAmelCase , perm=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.transpose(UpperCAmelCase , axes=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for transpose: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : str , UpperCAmelCase : Optional[int] ) -> List[str]:
if is_numpy_array(UpperCAmelCase ):
return np.reshape(UpperCAmelCase , UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.reshape(*UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.reshape(UpperCAmelCase , UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.reshape(UpperCAmelCase , UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for reshape: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=None ) -> Any:
if is_numpy_array(UpperCAmelCase ):
return np.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for squeeze: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : str , UpperCAmelCase : int ) -> str:
if is_numpy_array(UpperCAmelCase ):
return np.expand_dims(UpperCAmelCase , UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.unsqueeze(dim=UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.expand_dims(UpperCAmelCase , axis=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.expand_dims(UpperCAmelCase , axis=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
if is_numpy_array(UpperCAmelCase ):
return np.size(UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.numel()
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.size(UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ) -> Dict:
for key, value in auto_map.items():
if isinstance(UpperCAmelCase , (tuple, list) ):
UpperCAmelCase : List[Any] = [f'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase : List[Any] = f'''{repo_id}--{value}'''
return auto_map
def a__ ( UpperCAmelCase : Tuple ) -> Union[str, Any]:
for base_class in inspect.getmro(UpperCAmelCase ):
UpperCAmelCase : Any = base_class.__module__
UpperCAmelCase : Dict = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 336 | 0 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
@staticmethod
def SCREAMING_SNAKE_CASE__ ( *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class A ( unittest.TestCase ):
__snake_case = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = pipeline('''visual-question-answering''', model='''hf-internal-testing/tiny-vilt-random-vqa''' )
lowerCAmelCase_ = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = vqa_pipeline(__A, top_k=1 )
self.assertEqual(
__A, [
[{'''score''': ANY(__A ), '''answer''': ANY(__A )}],
[{'''score''': ANY(__A ), '''answer''': ANY(__A )}],
], )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = pipeline('''visual-question-answering''', model='''hf-internal-testing/tiny-vilt-random-vqa''' )
lowerCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowerCAmelCase_ = '''How many cats are there?'''
lowerCAmelCase_ = vqa_pipeline(image=__A, question='''How many cats are there?''', top_k=2 )
self.assertEqual(
__A, [{'''score''': ANY(__A ), '''answer''': ANY(__A )}, {'''score''': ANY(__A ), '''answer''': ANY(__A )}] )
lowerCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question}, top_k=2 )
self.assertEqual(
__A, [{'''score''': ANY(__A ), '''answer''': ANY(__A )}, {'''score''': ANY(__A ), '''answer''': ANY(__A )}] )
@slow
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = pipeline('''visual-question-answering''', model='''dandelin/vilt-b32-finetuned-vqa''' )
lowerCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowerCAmelCase_ = '''How many cats are there?'''
lowerCAmelCase_ = vqa_pipeline(image=__A, question=__A, top_k=2 )
self.assertEqual(
nested_simplify(__A, decimals=4 ), [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
lowerCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question}, top_k=2 )
self.assertEqual(
nested_simplify(__A, decimals=4 ), [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
lowerCAmelCase_ = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}], top_k=2 )
self.assertEqual(
nested_simplify(__A, decimals=4 ), [[{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2, )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
| 278 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = LayoutLMTokenizer
UpperCamelCase = LayoutLMTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __magic_name__ ( self : Any ):
super().setUp()
UpperCAmelCase : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __magic_name__ ( self : Union[str, Any], **__A : List[str] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname, **__A )
def __magic_name__ ( self : Optional[int], __A : int ):
UpperCAmelCase : Optional[Any] = '''UNwant\u00E9d,running'''
UpperCAmelCase : Optional[int] = '''unwanted, running'''
return input_text, output_text
def __magic_name__ ( self : Any ):
UpperCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__A, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ), [7, 4, 5, 1_0, 8, 9] )
def __magic_name__ ( self : Optional[int] ):
pass
| 336 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase (lowerCamelCase__ , unittest.TestCase ):
_lowercase = DiTPipeline
_lowercase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_lowercase = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
_lowercase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_lowercase = False
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = TransformeraDModel(
sample_size=16,num_layers=2,patch_size=4,attention_head_dim=8,num_attention_heads=2,in_channels=4,out_channels=8,attention_bias=__A,activation_fn='gelu-approximate',num_embeds_ada_norm=1000,norm_type='ada_norm_zero',norm_elementwise_affine=__A,)
__UpperCamelCase = AutoencoderKL()
__UpperCamelCase = DDIMScheduler()
__UpperCamelCase = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def snake_case_ ( self: Any,A_: Union[str, Any],A_: Any=0 ):
'''simple docstring'''
if str(__A ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(__A )
else:
__UpperCamelCase = torch.Generator(device=__A ).manual_seed(__A )
__UpperCamelCase = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = '''cpu'''
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__UpperCamelCase = self.get_dummy_inputs(__A )
__UpperCamelCase = pipe(**__A ).images
__UpperCamelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 16, 16, 3) )
__UpperCamelCase = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
__UpperCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__A,1E-3 )
def snake_case_ ( self: Dict ):
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=__A,expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(),reason='XFormers attention is only available with CUDA and `xformers` installed',)
def snake_case_ ( self: Dict ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
__UpperCamelCase = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
__UpperCamelCase = pipe.get_label_ids(__A )
__UpperCamelCase = pipe(__A,generator=__A,num_inference_steps=40,output_type='np' ).images
for word, image in zip(__A,__A ):
__UpperCamelCase = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
__UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
__UpperCamelCase = ['''vase''', '''umbrella''']
__UpperCamelCase = pipe.get_label_ids(__A )
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = pipe(__A,generator=__A,num_inference_steps=25,output_type='np' ).images
for word, image in zip(__A,__A ):
__UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 310 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCAmelCase :
def __init__( self : Any, __A : str, __A : Dict=1_3, __A : int=3_0, __A : Tuple=2, __A : Union[str, Any]=3, __A : Any=True, __A : str=True, __A : Dict=3_2, __A : List[Any]=2, __A : Optional[Any]=4, __A : Union[str, Any]=3_7, __A : int="gelu", __A : int=0.1, __A : List[Any]=0.1, __A : Tuple=1_0, __A : Tuple=0.0_2, __A : Any=3, __A : List[str]=0.6, __A : Any=None, ):
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : Dict = patch_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : Union[str, Any] = is_training
UpperCAmelCase : Union[str, Any] = use_labels
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : Optional[int] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Tuple = mask_ratio
UpperCAmelCase : Any = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase : Tuple = (image_size // patch_size) ** 2
UpperCAmelCase : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : str = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Optional[Any] ):
return ViTMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, decoder_hidden_size=self.hidden_size, decoder_num_hidden_layers=self.num_hidden_layers, decoder_num_attention_heads=self.num_attention_heads, decoder_intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, mask_ratio=self.mask_ratio, )
def __magic_name__ ( self : str, __A : List[Any], __A : Any, __A : Any ):
UpperCAmelCase : Optional[Any] = TFViTMAEModel(config=__A )
UpperCAmelCase : Tuple = model(__A, training=__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Tuple, __A : str, __A : int, __A : str ):
UpperCAmelCase : Dict = TFViTMAEForPreTraining(__A )
UpperCAmelCase : int = model(__A, training=__A )
# expected sequence length = num_patches
UpperCAmelCase : int = (self.image_size // self.patch_size) ** 2
UpperCAmelCase : Optional[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase : Tuple = 1
UpperCAmelCase : List[Any] = TFViTMAEForPreTraining(__A )
UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = model(__A, training=__A )
UpperCAmelCase : Union[str, Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : Union[str, Any] = config_and_inputs
UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
UpperCamelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = TFViTMAEModelTester(self )
UpperCAmelCase : int = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 )
def __magic_name__ ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[str] = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
UpperCAmelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A, tf.keras.layers.Layer ) )
def __magic_name__ ( self : str ):
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Any = model_class(__A )
UpperCAmelCase : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : int = [*signature.parameters.keys()]
UpperCAmelCase : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def __magic_name__ ( self : int ):
# make the mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : str = model_class(__A )
UpperCAmelCase : int = self._prepare_for_class(__A, __A )
UpperCAmelCase : Dict = model(__A, noise=__A )
UpperCAmelCase : Any = copy.deepcopy(self._prepare_for_class(__A, __A ) )
UpperCAmelCase : Union[str, Any] = model(**__A, noise=__A )
UpperCAmelCase : Dict = outputs_dict[0].numpy()
UpperCAmelCase : Tuple = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ), 1E-6 )
def __magic_name__ ( self : Optional[Any] ):
# make the mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : str = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__A : Union[str, Any] ):
UpperCAmelCase : str = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__A ):
UpperCAmelCase : Tuple = v.numpy()
else:
UpperCAmelCase : str = np.array(__A )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
UpperCAmelCase : Any = self._prepare_for_class(__A, __A )
UpperCAmelCase : Optional[int] = prepare_numpy_arrays(__A )
UpperCAmelCase : str = model(__A, noise=__A )
UpperCAmelCase : str = model(**__A, noise=__A )
self.assert_outputs_same(__A, __A )
def __magic_name__ ( self : int, __A : str, __A : Union[str, Any], __A : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase : Any = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase : int = tf.constant(__A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase : List[Any] = tf_noise
super().check_pt_tf_models(__A, __A, __A )
def __magic_name__ ( self : str ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Union[str, Any] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__A )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(__A, __A ),)
if isinstance(__A, __A )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__A, '''_keras_serializable''', __A )
}
UpperCAmelCase : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase : str = tf.convert_to_tensor(__A )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
UpperCAmelCase : Tuple = main_layer_class(__A )
UpperCAmelCase : int = {
name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCAmelCase : List[Any] = tf.keras.Model(__A, outputs=main_layer(__A ) )
UpperCAmelCase : List[Any] = model(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = os.path.join(__A, '''keras_model.h5''' )
model.save(__A )
UpperCAmelCase : List[str] = tf.keras.models.load_model(
__A, custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__A, tf.keras.Model )
UpperCAmelCase : Tuple = model(__A )
self.assert_outputs_same(__A, __A )
@slow
def __magic_name__ ( self : Dict ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : List[str] = self._prepare_for_class(__A, __A )
UpperCAmelCase : Union[str, Any] = model(__A, noise=__A )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase : Optional[int] = outputs.last_hidden_state.numpy()
UpperCAmelCase : Union[str, Any] = 0
else:
UpperCAmelCase : Optional[int] = outputs.logits.numpy()
UpperCAmelCase : int = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A, saved_model=__A )
UpperCAmelCase : Dict = model_class.from_pretrained(__A )
UpperCAmelCase : str = model(__A, noise=__A )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase : int = after_outputs['''last_hidden_state'''].numpy()
UpperCAmelCase : Dict = 0
else:
UpperCAmelCase : Any = after_outputs['''logits'''].numpy()
UpperCAmelCase : Dict = 0
UpperCAmelCase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__A, 1E-5 )
def __magic_name__ ( self : Optional[Any] ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
UpperCAmelCase : int = self._prepare_for_class(__A, __A )
UpperCAmelCase : List[Any] = model(__A, noise=__A )
UpperCAmelCase : str = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__A )
UpperCAmelCase : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCAmelCase : str = model_class.from_config(model.config )
UpperCAmelCase : List[str] = new_model(__A ) # Build model
new_model.set_weights(model.get_weights() )
UpperCAmelCase : Tuple = new_model(__A, noise=__A )
self.assert_outputs_same(__A, __A )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __magic_name__ ( self : Optional[int] ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def __magic_name__ ( self : Tuple ):
pass
@slow
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(__A )
def a__ ( ) -> Dict:
UpperCAmelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : List[str] ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : str ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase : Tuple = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
UpperCAmelCase : List[str] = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : str = image_processor(images=__A, return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase : Optional[int] = ViTMAEConfig()
UpperCAmelCase : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCAmelCase : Optional[int] = model(**__A, noise=__A )
# verify the logits
UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : List[str] = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3], __A, atol=1E-4 )
| 336 | 0 |
def _lowerCAmelCase ( lowerCAmelCase_ :int , lowerCAmelCase_ :Any )->Optional[Any]:
'''simple docstring'''
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ) , end="\t" )
else:
print("INF" , end="\t" )
print()
def _lowerCAmelCase ( lowerCAmelCase_ :List[Any] , lowerCAmelCase_ :int )->Optional[int]:
'''simple docstring'''
snake_case_ = [[float("inf" ) for _ in range(lowerCAmelCase_ )] for _ in range(lowerCAmelCase_ )]
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
snake_case_ = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(lowerCAmelCase_ ):
# looping through rows of graph array
for i in range(lowerCAmelCase_ ):
# looping through columns of graph array
for j in range(lowerCAmelCase_ ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
snake_case_ = dist[i][k] + dist[k][j]
_print_dist(lowerCAmelCase_ , lowerCAmelCase_ )
return dist, v
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Optional[Any] = int(input('''Enter number of vertices: '''))
SCREAMING_SNAKE_CASE :List[Any] = int(input('''Enter number of edges: '''))
SCREAMING_SNAKE_CASE :Optional[int] = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
SCREAMING_SNAKE_CASE :Optional[int] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
SCREAMING_SNAKE_CASE :Dict = int(input('''Enter source:'''))
SCREAMING_SNAKE_CASE :Tuple = int(input('''Enter destination:'''))
SCREAMING_SNAKE_CASE :Union[str, Any] = float(input('''Enter weight:'''))
SCREAMING_SNAKE_CASE :str = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 159 |
def a__ ( UpperCAmelCase : int ) -> int:
UpperCAmelCase : list[list[int]] = [[0 for _ in range(UpperCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , UpperCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_lowerCamelCase : List[Any] = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
_lowerCamelCase : str = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 336 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( lowerCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = LEDTokenizer
__UpperCAmelCase : Tuple = LEDTokenizerFast
__UpperCAmelCase : Any = True
def _lowercase ( self : int ):
super().setUp()
__lowercase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__lowercase = dict(zip(__A, range(len(__A ) ) ) )
__lowercase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowercase = {'''unk_token''': '''<unk>'''}
__lowercase = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
__lowercase = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def _lowercase ( self : Any, **UpperCAmelCase__ : List[Any] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **__A )
def _lowercase ( self : Tuple, **UpperCAmelCase__ : Dict ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **__A )
def _lowercase ( self : int, UpperCAmelCase__ : Tuple ):
return "lower newer", "lower newer"
@cached_property
def _lowercase ( self : List[Any] ):
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def _lowercase ( self : Any ):
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def _lowercase ( self : Union[str, Any] ):
__lowercase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__lowercase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowercase = tokenizer(__A, max_length=len(__A ), padding=__A, return_tensors="pt" )
self.assertIsInstance(__A, __A )
self.assertEqual((2, 9), batch.input_ids.shape )
self.assertEqual((2, 9), batch.attention_mask.shape )
__lowercase = batch.input_ids.tolist()[0]
self.assertListEqual(__A, __A )
@require_torch
def _lowercase ( self : Optional[int] ):
__lowercase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowercase = tokenizer(__A, padding=__A, return_tensors="pt" )
self.assertIn("input_ids", __A )
self.assertIn("attention_mask", __A )
self.assertNotIn("labels", __A )
self.assertNotIn("decoder_attention_mask", __A )
@require_torch
def _lowercase ( self : int ):
__lowercase = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowercase = tokenizer(text_target=__A, max_length=3_2, padding="max_length", return_tensors="pt" )
self.assertEqual(3_2, targets["input_ids"].shape[1] )
@require_torch
def _lowercase ( self : Optional[int] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowercase = tokenizer(
["I am a small frog" * 1_0_2_4, "I am a small frog"], padding=__A, truncation=__A, return_tensors="pt" )
self.assertIsInstance(__A, __A )
self.assertEqual(batch.input_ids.shape, (2, 5_1_2_2) )
@require_torch
def _lowercase ( self : int ):
__lowercase = ['''A long paragraph for summarization.''']
__lowercase = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowercase = tokenizer(__A, return_tensors="pt" )
__lowercase = tokenizer(text_target=__A, return_tensors="pt" )
__lowercase = inputs['''input_ids''']
__lowercase = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _lowercase ( self : int ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowercase = ['''Summary of the text.''', '''Another summary.''']
__lowercase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__lowercase = tokenizer(__A, padding=__A )
__lowercase = [[0] * len(__A ) for x in encoded_output['''input_ids''']]
__lowercase = tokenizer.pad(__A )
self.assertSequenceEqual(outputs["global_attention_mask"], __A )
def _lowercase ( self : str ):
pass
def _lowercase ( self : List[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowercase = self.rust_tokenizer_class.from_pretrained(__A, **__A )
__lowercase = self.tokenizer_class.from_pretrained(__A, **__A )
__lowercase = '''A, <mask> AllenNLP sentence.'''
__lowercase = tokenizer_r.encode_plus(__A, add_special_tokens=__A, return_token_type_ids=__A )
__lowercase = tokenizer_p.encode_plus(__A, add_special_tokens=__A, return_token_type_ids=__A )
self.assertEqual(sum(tokens_r["token_type_ids"] ), sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ), sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ), )
__lowercase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
__lowercase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"], [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"], [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__A, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__A, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 17 |
from __future__ import annotations
def a__ ( UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Union[str, Any] = len(UpperCAmelCase )
# We need to create solution object to save path.
UpperCAmelCase : int = [[0 for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )]
UpperCAmelCase : Union[str, Any] = run_maze(UpperCAmelCase , 0 , 0 , UpperCAmelCase )
if solved:
print('''\n'''.join(str(UpperCAmelCase ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def a__ ( UpperCAmelCase : list[list[int]] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Dict = len(UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
UpperCAmelCase : Dict = 1
return True
UpperCAmelCase : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds
UpperCAmelCase : List[Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCAmelCase : Any = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCAmelCase : str = 1
# check for directions
if (
run_maze(UpperCAmelCase , i + 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j + 1 , UpperCAmelCase )
or run_maze(UpperCAmelCase , i - 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j - 1 , UpperCAmelCase )
):
return True
UpperCAmelCase : Any = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 0 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
A : Optional[int] = logging.get_logger(__name__)
def lowercase_ ( _A : str ):
"""simple docstring"""
lowerCamelCase__ : Tuple = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
lowerCamelCase__ : Dict = MaskFormerConfig(backbone_config=_A )
lowerCamelCase__ : int = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
lowerCamelCase__ : Any = 847
lowerCamelCase__ : Optional[Any] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
lowerCamelCase__ : List[str] = 150
lowerCamelCase__ : int = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
lowerCamelCase__ : str = 171
lowerCamelCase__ : Optional[Any] = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
lowerCamelCase__ : str = 133
lowerCamelCase__ : str = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
lowerCamelCase__ : Tuple = 19
lowerCamelCase__ : Any = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
lowerCamelCase__ : Tuple = 65
lowerCamelCase__ : Optional[Any] = '''mapillary-vistas-id2label.json'''
lowerCamelCase__ : List[Any] = json.load(open(hf_hub_download(_A , _A , repo_type="dataset" ) , "r" ) )
lowerCamelCase__ : str = {int(_A ): v for k, v in idalabel.items()}
return config
def lowercase_ ( _A : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.layers.{i}.downsample.reduction.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F"sem_seg_head.adapter_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias") )
# cross-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias") )
# MLP 1
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", F"model.transformer_module.decoder.layers.{idx}.fc1.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", F"model.transformer_module.decoder.layers.{idx}.fc1.bias") )
# MLP 2
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", F"model.transformer_module.decoder.layers.{idx}.fc2.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", F"model.transformer_module.decoder.layers.{idx}.fc2.bias") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias") )
# layernorm 3 (final layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.weight", F"mask_embedder.{i}.0.weight") )
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.bias", F"mask_embedder.{i}.0.bias") )
# fmt: on
return rename_keys
def lowercase_ ( _A : Any , _A : Any , _A : List[str] ):
"""simple docstring"""
lowerCamelCase__ : Any = dct.pop(_A )
lowerCamelCase__ : List[Any] = val
def lowercase_ ( _A : Optional[Any] , _A : List[Any] ):
"""simple docstring"""
lowerCamelCase__ : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCamelCase__ : List[str] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase__ : List[Any] = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.weight" )
lowerCamelCase__ : Any = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : str = in_proj_weight[:dim, :]
lowerCamelCase__ : int = in_proj_bias[: dim]
lowerCamelCase__ : Dict = in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase__ : Optional[Any] = in_proj_bias[
dim : dim * 2
]
lowerCamelCase__ : Dict = in_proj_weight[
-dim :, :
]
lowerCamelCase__ : List[str] = in_proj_bias[-dim :]
# fmt: on
def lowercase_ ( _A : str , _A : int ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase__ : Any = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight" )
lowerCamelCase__ : Optional[int] = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Tuple = in_proj_weight[: hidden_size, :]
lowerCamelCase__ : Optional[int] = in_proj_bias[:config.hidden_size]
lowerCamelCase__ : Optional[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase__ : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase__ : Optional[Any] = in_proj_weight[-hidden_size :, :]
lowerCamelCase__ : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase__ : Optional[Any] = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight" )
lowerCamelCase__ : Dict = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Dict = in_proj_weight[: hidden_size, :]
lowerCamelCase__ : Dict = in_proj_bias[:config.hidden_size]
lowerCamelCase__ : Optional[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase__ : List[str] = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase__ : Union[str, Any] = in_proj_weight[-hidden_size :, :]
lowerCamelCase__ : int = in_proj_bias[-hidden_size :]
# fmt: on
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ : Dict = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def lowercase_ ( _A : str , _A : str , _A : str , _A : bool = False ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = get_maskformer_config(_A )
# load original state_dict
with open(_A , "rb" ) as f:
lowerCamelCase__ : Optional[Any] = pickle.load(_A )
lowerCamelCase__ : Any = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
lowerCamelCase__ : List[str] = create_rename_keys(_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_swin_q_k_v(_A , config.backbone_config )
read_in_decoder_q_k_v(_A , _A )
# update to torch tensors
for key, value in state_dict.items():
lowerCamelCase__ : List[str] = torch.from_numpy(_A )
# load 🤗 model
lowerCamelCase__ : Tuple = MaskFormerForInstanceSegmentation(_A )
model.eval()
for name, param in model.named_parameters():
print(_A , param.shape )
lowerCamelCase__ : int = model.load_state_dict(_A , strict=_A )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_A ) == 0, F"Unexpected keys: {unexpected_keys}"
# verify results
lowerCamelCase__ : Optional[Any] = prepare_img()
if "vistas" in model_name:
lowerCamelCase__ : List[str] = 65
elif "cityscapes" in model_name:
lowerCamelCase__ : List[str] = 65535
else:
lowerCamelCase__ : str = 255
lowerCamelCase__ : Union[str, Any] = True if '''ade''' in model_name else False
lowerCamelCase__ : Any = MaskFormerImageProcessor(ignore_index=_A , reduce_labels=_A )
lowerCamelCase__ : int = image_processor(_A , return_tensors="pt" )
lowerCamelCase__ : Any = model(**_A )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
lowerCamelCase__ : int = torch.tensor(
[[3.6_353, -4.4_770, -2.6_065], [0.5_081, -4.2_394, -3.5_343], [2.1_909, -5.0_353, -1.9_323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _A , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"Saving model and image processor to {pytorch_dump_folder_path}" )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F"nielsr/{model_name}" )
image_processor.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
A : Optional[int] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 184 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __UpperCAmelCase :
def __init__( self : List[Any], __A : List[str], __A : List[str]=1_3, __A : Any=6_4, __A : Optional[Any]=2, __A : str=3, __A : str=True, __A : str=True, __A : Optional[Any]=3_2, __A : List[str]=5, __A : int=4, __A : str=3_7, __A : str="gelu", __A : Dict=0.1, __A : List[Any]=0.1, __A : Dict=1_0, __A : int=0.0_2, __A : Any=[1, 1_6, 4, 4], __A : Optional[int]=None, ):
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : Any = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : List[str] = patch_size
UpperCAmelCase : Dict = num_channels
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Union[str, Any] = num_hidden_layers
UpperCAmelCase : Optional[Any] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase : str = type_sequence_label_size
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : int = scope
UpperCAmelCase : List[str] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
UpperCAmelCase : str = (self.image_size // 3_2) ** 2
UpperCAmelCase : List[str] = num_patches + 1
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : str = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Any ):
UpperCAmelCase : Dict = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 1_6, 3_2],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, backbone_featmap_shape=self.backbone_featmap_shape, backbone_config=__A, )
def __magic_name__ ( self : Optional[int], __A : Optional[int], __A : int, __A : Tuple ):
UpperCAmelCase : int = ViTHybridModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Tuple = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Tuple, __A : Dict, __A : str, __A : List[str] ):
UpperCAmelCase : str = self.type_sequence_label_size
UpperCAmelCase : List[Any] = ViTHybridForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self : int ):
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = config_and_inputs
UpperCAmelCase : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Any = ViTHybridModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 )
def __magic_name__ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : int ):
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCAmelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A, nn.Linear ) )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(__A )
UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : str = [*signature.parameters.keys()]
UpperCAmelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = _config_zero_init(__A )
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(config=__A )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
UpperCAmelCase : Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@slow
def __magic_name__ ( self : List[str] ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Union[str, Any] = ViTHybridModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> Tuple:
UpperCAmelCase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : str ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__A )
UpperCAmelCase : Tuple = self.default_image_processor
UpperCAmelCase : int = prepare_img()
UpperCAmelCase : Union[str, Any] = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**__A )
# verify the logits
UpperCAmelCase : str = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Optional[Any] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@slow
@require_accelerate
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Union[str, Any] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''', device_map='''auto''' )
UpperCAmelCase : Tuple = prepare_img()
UpperCAmelCase : Optional[int] = image_processor(images=__A, return_tensors='''pt''' )
UpperCAmelCase : Dict = model(**__A )
UpperCAmelCase : Any = outputs.logits
# model predicts one of the 1000 ImageNet classes
UpperCAmelCase : Dict = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx], '''tabby, tabby cat''' )
| 336 | 0 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
UpperCAmelCase : str = (720, 1280) # Height, Width
UpperCAmelCase : Dict = (0.4, 0.6) # if height or width lower than this scale, drop it.
UpperCAmelCase : List[str] = 1 / 100
UpperCAmelCase : List[str] = ""
UpperCAmelCase : Tuple = ""
UpperCAmelCase : Optional[Any] = ""
UpperCAmelCase : Tuple = 250
def lowerCamelCase ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : int = get_dataset(_UpperCamelCase , _UpperCamelCase )
for index in range(_UpperCamelCase ):
__UpperCAmelCase : str = random.sample(range(len(_UpperCamelCase ) ) , 4 )
__UpperCAmelCase : Union[str, Any] = update_image_and_anno(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , filter_scale=_UpperCamelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCAmelCase : Dict = random_chars(3_2 )
__UpperCAmelCase : Union[str, Any] = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
__UpperCAmelCase : Optional[int] = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' , _UpperCamelCase , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
__UpperCAmelCase : Tuple = []
for anno in new_annos:
__UpperCAmelCase : str = anno[3] - anno[1]
__UpperCAmelCase : Tuple = anno[4] - anno[2]
__UpperCAmelCase : Dict = anno[1] + width / 2
__UpperCAmelCase : Any = anno[2] + height / 2
__UpperCAmelCase : List[str] = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(_UpperCamelCase )
with open(f'''{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> tuple[list, list]:
'''simple docstring'''
__UpperCAmelCase : str = []
__UpperCAmelCase : Any = []
for label_file in glob.glob(os.path.join(_UpperCamelCase , """*.txt""" ) ):
__UpperCAmelCase : str = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(_UpperCamelCase ) as in_file:
__UpperCAmelCase : Tuple = in_file.readlines()
__UpperCAmelCase : int = os.path.join(_UpperCamelCase , f'''{label_name}.jpg''' )
__UpperCAmelCase : Any = []
for obj_list in obj_lists:
__UpperCAmelCase : int = obj_list.rstrip("""\n""" ).split(""" """ )
__UpperCAmelCase : Optional[Any] = float(obj[1] ) - float(obj[3] ) / 2
__UpperCAmelCase : Optional[int] = float(obj[2] ) - float(obj[4] ) / 2
__UpperCAmelCase : Any = float(obj[1] ) + float(obj[3] ) / 2
__UpperCAmelCase : List[Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(_UpperCamelCase )
labels.append(_UpperCamelCase )
return img_paths, labels
def lowerCamelCase ( _UpperCamelCase : list , _UpperCamelCase : list , _UpperCamelCase : list[int] , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : tuple[float, float] , _UpperCamelCase : float = 0.0 , ) -> tuple[list, list, str]:
'''simple docstring'''
__UpperCAmelCase : List[str] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__UpperCAmelCase : int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCAmelCase : Union[str, Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCAmelCase : Optional[int] = int(scale_x * output_size[1] )
__UpperCAmelCase : Any = int(scale_y * output_size[0] )
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Any = []
for i, index in enumerate(_UpperCamelCase ):
__UpperCAmelCase : List[str] = all_img_list[index]
path_list.append(_UpperCamelCase )
__UpperCAmelCase : Dict = all_annos[index]
__UpperCAmelCase : Tuple = cva.imread(_UpperCamelCase )
if i == 0: # top-left
__UpperCAmelCase : Dict = cva.resize(_UpperCamelCase , (divid_point_x, divid_point_y) )
__UpperCAmelCase : Dict = img
for bbox in img_annos:
__UpperCAmelCase : List[Any] = bbox[1] * scale_x
__UpperCAmelCase : Tuple = bbox[2] * scale_y
__UpperCAmelCase : Union[str, Any] = bbox[3] * scale_x
__UpperCAmelCase : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__UpperCAmelCase : List[Any] = cva.resize(_UpperCamelCase , (output_size[1] - divid_point_x, divid_point_y) )
__UpperCAmelCase : Tuple = img
for bbox in img_annos:
__UpperCAmelCase : List[Any] = scale_x + bbox[1] * (1 - scale_x)
__UpperCAmelCase : int = bbox[2] * scale_y
__UpperCAmelCase : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCAmelCase : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__UpperCAmelCase : Dict = cva.resize(_UpperCamelCase , (divid_point_x, output_size[0] - divid_point_y) )
__UpperCAmelCase : Optional[Any] = img
for bbox in img_annos:
__UpperCAmelCase : Union[str, Any] = bbox[1] * scale_x
__UpperCAmelCase : Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
__UpperCAmelCase : Any = bbox[3] * scale_x
__UpperCAmelCase : Tuple = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__UpperCAmelCase : List[str] = cva.resize(
_UpperCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__UpperCAmelCase : int = img
for bbox in img_annos:
__UpperCAmelCase : Union[str, Any] = scale_x + bbox[1] * (1 - scale_x)
__UpperCAmelCase : Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
__UpperCAmelCase : int = scale_x + bbox[3] * (1 - scale_x)
__UpperCAmelCase : Optional[int] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__UpperCAmelCase : Union[str, Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowerCamelCase ( _UpperCamelCase : int ) -> str:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__UpperCAmelCase : List[str] = ascii_lowercase + digits
return "".join(random.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 115 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def a__ ( ) -> tuple[list[int], int]:
UpperCAmelCase : str = [randint(-1_000 , 1_000 ) for i in range(10 )]
UpperCAmelCase : Any = randint(-5_000 , 5_000 )
return (arr, r)
_lowerCamelCase : Any = make_dataset()
def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, ...]:
for triplet in permutations(UpperCAmelCase , 3 ):
if sum(UpperCAmelCase ) == target:
return tuple(sorted(UpperCAmelCase ) )
return (0, 0, 0)
def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, int, int]:
arr.sort()
UpperCAmelCase : Tuple = len(UpperCAmelCase )
for i in range(n - 1 ):
UpperCAmelCase , UpperCAmelCase : int = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def a__ ( ) -> tuple[float, float]:
UpperCAmelCase : Union[str, Any] = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
UpperCAmelCase : Tuple = '''
triplet_sum1(*dataset)
'''
UpperCAmelCase : List[str] = '''
triplet_sum2(*dataset)
'''
UpperCAmelCase : Tuple = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 )
UpperCAmelCase : str = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 )
return (min(UpperCAmelCase ), min(UpperCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCamelCase : int = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 336 | 0 |
"""simple docstring"""
import numpy
# List of input, output pairs
__magic_name__ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
__magic_name__ = (((515, 22, 13), 555), ((61, 35, 49), 150))
__magic_name__ = [2, 4, 1, 5]
__magic_name__ = len(train_data)
__magic_name__ = 0.009
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_="train" ):
return calculate_hypothesis_value(UpperCamelCase_ , UpperCamelCase_ ) - output(
UpperCamelCase_ , UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = 0
for i in range(len(UpperCamelCase_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_=m ):
__SCREAMING_SNAKE_CASE = 0
for i in range(UpperCamelCase_ ):
if index == -1:
summation_value += _error(UpperCamelCase_ )
else:
summation_value += _error(UpperCamelCase_ ) * train_data[i][0][index]
return summation_value
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = summation_of_cost_derivative(UpperCamelCase_ , UpperCamelCase_ ) / m
return cost_derivative_value
def _lowerCAmelCase ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__SCREAMING_SNAKE_CASE = 0.000_002
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while True:
j += 1
__SCREAMING_SNAKE_CASE = [0, 0, 0, 0]
for i in range(0 , len(UpperCamelCase_ ) ):
__SCREAMING_SNAKE_CASE = get_cost_derivative(i - 1 )
__SCREAMING_SNAKE_CASE = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCamelCase_ , UpperCamelCase_ , atol=UpperCamelCase_ , rtol=UpperCamelCase_ , ):
break
__SCREAMING_SNAKE_CASE = temp_parameter_vector
print(("""Number of iterations:""", j) )
def _lowerCAmelCase ( ):
for i in range(len(UpperCamelCase_ ) ):
print(("""Actual output value:""", output(UpperCamelCase_ , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(UpperCamelCase_ , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 100 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __UpperCAmelCase :
def __magic_name__ ( self : int, __A : Dict ):
raise NotImplementedError()
def __magic_name__ ( self : int ):
raise NotImplementedError()
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : str, __A : "AutoTokenizer", __A : bool = False, **__A : str ):
UpperCAmelCase : List[str] = tokenizer
UpperCAmelCase : str = skip_prompt
UpperCAmelCase : List[str] = decode_kwargs
# variables used in the streaming process
UpperCAmelCase : Dict = []
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = True
def __magic_name__ ( self : Dict, __A : Optional[int] ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
UpperCAmelCase : Union[str, Any] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
UpperCAmelCase : Optional[int] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
UpperCAmelCase : Any = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
UpperCAmelCase : Union[str, Any] = text[self.print_len :]
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
# If the last token is a CJK character, we print the characters.
elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
UpperCAmelCase : Union[str, Any] = text[self.print_len :]
self.print_len += len(__A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
UpperCAmelCase : Optional[Any] = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(__A )
self.on_finalized_text(__A )
def __magic_name__ ( self : str ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
UpperCAmelCase : int = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
UpperCAmelCase : Dict = text[self.print_len :]
UpperCAmelCase : List[Any] = []
UpperCAmelCase : List[Any] = 0
else:
UpperCAmelCase : Dict = ''''''
UpperCAmelCase : str = True
self.on_finalized_text(__A, stream_end=__A )
def __magic_name__ ( self : List[str], __A : str, __A : bool = False ):
print(__A, flush=__A, end='''''' if not stream_end else None )
def __magic_name__ ( self : List[Any], __A : Optional[int] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Dict, __A : "AutoTokenizer", __A : bool = False, __A : Optional[float] = None, **__A : str ):
super().__init__(__A, __A, **__A )
UpperCAmelCase : Dict = Queue()
UpperCAmelCase : Any = None
UpperCAmelCase : Any = timeout
def __magic_name__ ( self : Dict, __A : str, __A : bool = False ):
self.text_queue.put(__A, timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal, timeout=self.timeout )
def __iter__( self : int ):
return self
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : List[Any] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 336 | 0 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
print("Loading config file...")
def flatten_yaml_as_dict(_lowerCamelCase : Tuple , _lowerCamelCase : Any="" , _lowerCamelCase : Dict="."):
lowercase__ : List[str] = []
for k, v in d.items():
lowercase__ : List[Any] = parent_key + sep + k if parent_key else k
if isinstance(_lowerCamelCase , collections.abc.MutableMapping):
items.extend(flatten_yaml_as_dict(_lowerCamelCase , _lowerCamelCase , sep=_lowerCamelCase).items())
else:
items.append((new_key, v))
return dict(_lowerCamelCase)
lowercase__ : List[str] = argparse.Namespace()
with open(_lowerCamelCase , "r") as yaml_file:
try:
lowercase__ : List[str] = yaml.load(_lowerCamelCase , Loader=yaml.FullLoader)
lowercase__ : Optional[int] = flatten_yaml_as_dict(_lowerCamelCase)
for k, v in flat_cfg.items():
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
except yaml.YAMLError as exc:
logger.error("Error while loading config file: {}. Error message: {}".format(_lowerCamelCase , str(_lowerCamelCase)))
return config
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : int):
lowercase__ : int = MobileViTVaConfig()
lowercase__ : str = False
# dataset
if task_name.startswith("imagenet1k_"):
lowercase__ : Any = 1000
if int(task_name.strip().split("_")[-1]) == 384:
lowercase__ : Any = 384
else:
lowercase__ : Tuple = 256
lowercase__ : int = '''imagenet-1k-id2label.json'''
elif task_name.startswith("imagenet21k_to_1k_"):
lowercase__ : Optional[Any] = 2_1000
if int(task_name.strip().split("_")[-1]) == 384:
lowercase__ : str = 384
else:
lowercase__ : Dict = 256
lowercase__ : List[Any] = '''imagenet-22k-id2label.json'''
elif task_name.startswith("ade20k_"):
lowercase__ : Optional[Any] = 151
lowercase__ : Tuple = 512
lowercase__ : Tuple = '''ade20k-id2label.json'''
lowercase__ : Tuple = True
elif task_name.startswith("voc_"):
lowercase__ : Dict = 21
lowercase__ : str = 512
lowercase__ : Union[str, Any] = '''pascal-voc-id2label.json'''
lowercase__ : Dict = True
# orig_config
lowercase__ : List[Any] = load_orig_config_file(_lowerCamelCase)
assert getattr(_lowerCamelCase , "model.classification.name" , -1) == "mobilevit_v2", "Invalid model"
lowercase__ : Tuple = getattr(_lowerCamelCase , "model.classification.mitv2.width_multiplier" , 1.0)
assert (
getattr(_lowerCamelCase , "model.classification.mitv2.attn_norm_layer" , -1) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowercase__ : int = getattr(_lowerCamelCase , "model.classification.activation.name" , "swish")
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowercase__ : str = getattr(_lowerCamelCase , "model.segmentation.output_stride" , 16)
if "_deeplabv3" in task_name:
lowercase__ : int = getattr(_lowerCamelCase , "model.segmentation.deeplabv3.aspp_rates" , [12, 24, 36])
lowercase__ : Any = getattr(_lowerCamelCase , "model.segmentation.deeplabv3.aspp_out_channels" , 512)
lowercase__ : Optional[Any] = getattr(_lowerCamelCase , "model.segmentation.deeplabv3.aspp_dropout" , 0.1)
# id2label
lowercase__ : Union[str, Any] = '''huggingface/label-files'''
lowercase__ : List[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset") , "r"))
lowercase__ : Any = {int(_lowerCamelCase): v for k, v in idalabel.items()}
lowercase__ : int = idalabel
lowercase__ : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]):
lowercase__ : Union[str, Any] = dct.pop(_lowerCamelCase)
lowercase__ : List[str] = val
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int=False):
if base_model:
lowercase__ : Dict = ''''''
else:
lowercase__ : Dict = '''mobilevitv2.'''
lowercase__ : Optional[int] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowercase__ : List[str] = k[8:]
else:
lowercase__ : Dict = k
if ".block." in k:
lowercase__ : List[Any] = k_new.replace(".block." , ".")
if ".conv." in k:
lowercase__ : Optional[int] = k_new.replace(".conv." , ".convolution.")
if ".norm." in k:
lowercase__ : List[str] = k_new.replace(".norm." , ".normalization.")
if "conv_1." in k:
lowercase__ : Union[str, Any] = k_new.replace("conv_1." , f'''{model_prefix}conv_stem.''')
for i in [1, 2]:
if f'''layer_{i}.''' in k:
lowercase__ : Union[str, Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''')
if ".exp_1x1." in k:
lowercase__ : Optional[Any] = k_new.replace(".exp_1x1." , ".expand_1x1.")
if ".red_1x1." in k:
lowercase__ : int = k_new.replace(".red_1x1." , ".reduce_1x1.")
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
lowercase__ : Any = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''')
if f'''layer_{i}.1.local_rep.0.''' in k:
lowercase__ : str = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''')
if f'''layer_{i}.1.local_rep.1.''' in k:
lowercase__ : int = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''')
for i in [3, 4, 5]:
if i == 3:
lowercase__ : Dict = [0, 1]
elif i == 4:
lowercase__ : Dict = [0, 1, 2, 3]
elif i == 5:
lowercase__ : int = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
lowercase__ : Optional[Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''')
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowercase__ : Any = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''')
if f'''layer_{i}.1.conv_proj.''' in k:
lowercase__ : Union[str, Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''')
if "pre_norm_attn.0." in k:
lowercase__ : Optional[int] = k_new.replace("pre_norm_attn.0." , "layernorm_before.")
if "pre_norm_attn.1." in k:
lowercase__ : Optional[Any] = k_new.replace("pre_norm_attn.1." , "attention.")
if "pre_norm_ffn.0." in k:
lowercase__ : List[Any] = k_new.replace("pre_norm_ffn.0." , "layernorm_after.")
if "pre_norm_ffn.1." in k:
lowercase__ : List[Any] = k_new.replace("pre_norm_ffn.1." , "ffn.conv1.")
if "pre_norm_ffn.3." in k:
lowercase__ : Any = k_new.replace("pre_norm_ffn.3." , "ffn.conv2.")
if "classifier.1." in k:
lowercase__ : Optional[int] = k_new.replace("classifier.1." , "classifier.")
if "seg_head." in k:
lowercase__ : Union[str, Any] = k_new.replace("seg_head." , "segmentation_head.")
if ".aspp_layer." in k:
lowercase__ : Tuple = k_new.replace(".aspp_layer." , ".")
if ".aspp_pool." in k:
lowercase__ : Optional[int] = k_new.replace(".aspp_pool." , ".")
rename_keys.append((k, k_new))
return rename_keys
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
lowercase__ : str = []
for k in state_dict.keys():
if k.startswith("seg_head.aux_head."):
keys_to_ignore.append(_lowerCamelCase)
for k in keys_to_ignore:
state_dict.pop(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( ):
lowercase__ : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowercase__ : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw)
return im
@torch.no_grad()
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any]):
lowercase__ : Union[str, Any] = get_mobilevitva_config(_lowerCamelCase , _lowerCamelCase)
# load original state_dict
lowercase__ : List[str] = torch.load(_lowerCamelCase , map_location="cpu")
# load huggingface model
if task_name.startswith("ade20k_") or task_name.startswith("voc_"):
lowercase__ : str = MobileViTVaForSemanticSegmentation(_lowerCamelCase).eval()
lowercase__ : str = False
else:
lowercase__ : Union[str, Any] = MobileViTVaForImageClassification(_lowerCamelCase).eval()
lowercase__ : Any = False
# remove and rename some keys of load the original model
lowercase__ : Optional[Any] = checkpoint
remove_unused_keys(_lowerCamelCase)
lowercase__ : Optional[Any] = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase)
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# load modified state_dict
model.load_state_dict(_lowerCamelCase)
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase__ : Dict = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32)
lowercase__ : Any = image_processor(images=prepare_img() , return_tensors="pt")
lowercase__ : Union[str, Any] = model(**_lowerCamelCase)
# verify classification model
if task_name.startswith("imagenet"):
lowercase__ : Optional[Any] = outputs.logits
lowercase__ : int = logits.argmax(-1).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx])
if task_name.startswith("imagenet1k_256") and config.width_multiplier == 1.0:
# expected_logits for base variant
lowercase__ : str = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01])
assert torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1E-4)
Path(_lowerCamelCase).mkdir(exist_ok=_lowerCamelCase)
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(_lowerCamelCase)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(_lowerCamelCase)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 87 |
import numpy
# List of input, output pairs
_lowerCamelCase : Dict = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
_lowerCamelCase : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
_lowerCamelCase : Dict = [2, 4, 1, 5]
_lowerCamelCase : Dict = len(train_data)
_lowerCamelCase : int = 0.0_0_9
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]="train" ) -> Dict:
return calculate_hypothesis_value(UpperCAmelCase , UpperCAmelCase ) - output(
UpperCAmelCase , UpperCAmelCase )
def a__ ( UpperCAmelCase : int ) -> Any:
UpperCAmelCase : str = 0
for i in range(len(UpperCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> Optional[int]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def a__ ( UpperCAmelCase : int , UpperCAmelCase : Optional[Any] ) -> List[str]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : str=m ) -> Dict:
UpperCAmelCase : Optional[int] = 0
for i in range(UpperCAmelCase ):
if index == -1:
summation_value += _error(UpperCAmelCase )
else:
summation_value += _error(UpperCAmelCase ) * train_data[i][0][index]
return summation_value
def a__ ( UpperCAmelCase : Dict ) -> Dict:
UpperCAmelCase : Dict = summation_of_cost_derivative(UpperCAmelCase , UpperCAmelCase ) / m
return cost_derivative_value
def a__ ( ) -> List[Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase : List[str] = 0.000002
UpperCAmelCase : Any = 0
UpperCAmelCase : Dict = 0
while True:
j += 1
UpperCAmelCase : List[Any] = [0, 0, 0, 0]
for i in range(0 , len(UpperCAmelCase ) ):
UpperCAmelCase : List[str] = get_cost_derivative(i - 1 )
UpperCAmelCase : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCAmelCase , UpperCAmelCase , atol=UpperCAmelCase , rtol=UpperCAmelCase , ):
break
UpperCAmelCase : int = temp_parameter_vector
print(('''Number of iterations:''', j) )
def a__ ( ) -> List[Any]:
for i in range(len(UpperCAmelCase ) ):
print(('''Actual output value:''', output(UpperCAmelCase , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(UpperCAmelCase , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 336 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowercase__ ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = """donut-swin"""
A_ : Optional[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __snake_case=224 , __snake_case=4 , __snake_case=3 , __snake_case=96 , __snake_case=[2, 2, 6, 2] , __snake_case=[3, 6, 12, 24] , __snake_case=7 , __snake_case=4.0 , __snake_case=True , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.1 , __snake_case="gelu" , __snake_case=False , __snake_case=0.02 , __snake_case=1e-5 , **__snake_case , ):
super().__init__(**__A )
_SCREAMING_SNAKE_CASE : str = image_size
_SCREAMING_SNAKE_CASE : Dict = patch_size
_SCREAMING_SNAKE_CASE : Any = num_channels
_SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim
_SCREAMING_SNAKE_CASE : List[Any] = depths
_SCREAMING_SNAKE_CASE : List[Any] = len(__A )
_SCREAMING_SNAKE_CASE : Dict = num_heads
_SCREAMING_SNAKE_CASE : List[str] = window_size
_SCREAMING_SNAKE_CASE : List[Any] = mlp_ratio
_SCREAMING_SNAKE_CASE : Union[str, Any] = qkv_bias
_SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Tuple = drop_path_rate
_SCREAMING_SNAKE_CASE : Any = hidden_act
_SCREAMING_SNAKE_CASE : List[Any] = use_absolute_embeddings
_SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
_SCREAMING_SNAKE_CASE : Tuple = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_SCREAMING_SNAKE_CASE : Union[str, Any] = int(embed_dim * 2 ** (len(__A ) - 1) )
| 200 |
def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Optional[Any]:
UpperCAmelCase : List[str] = 0
UpperCAmelCase : List[Any] = len(UpperCAmelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCAmelCase ):
return None
UpperCAmelCase : Optional[Any] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
UpperCAmelCase : Any = left
UpperCAmelCase : List[str] = point
elif point > right:
UpperCAmelCase : Any = right
UpperCAmelCase : List[str] = point
else:
if item < current_item:
UpperCAmelCase : Optional[int] = point - 1
else:
UpperCAmelCase : str = point + 1
return None
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] ) -> Dict:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCAmelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
elif point > right:
return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , point - 1 )
else:
return interpolation_search_by_recursion(
UpperCAmelCase , UpperCAmelCase , point + 1 , UpperCAmelCase )
def a__ ( UpperCAmelCase : Union[str, Any] ) -> int:
if collection != sorted(UpperCAmelCase ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
_lowerCamelCase : Optional[int] = 0
if debug == 1:
_lowerCamelCase : Dict = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
_lowerCamelCase : List[Any] = 6_7
_lowerCamelCase : Optional[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 336 | 0 |
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
# Initialise PyTorch model
a__: Union[str, Any] = BigBirdConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(F'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
a__: str = BigBirdForQuestionAnswering(_SCREAMING_SNAKE_CASE )
else:
a__: str = BigBirdForPreTraining(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , is_trivia_qa=_SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
lowercase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 290 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : List[str]=False ) -> Any:
UpperCAmelCase : Optional[int] = '''backbone.''' if is_semantic else ''''''
UpperCAmelCase : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', '''beit.embeddings.cls_token'''),
(f'''{prefix}patch_embed.proj.weight''', '''beit.embeddings.patch_embeddings.projection.weight'''),
(f'''{prefix}patch_embed.proj.bias''', '''beit.embeddings.patch_embeddings.projection.bias'''),
(f'''{prefix}pos_embed''', '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str=False , UpperCAmelCase : Dict=False ) -> Any:
for i in range(config.num_hidden_layers ):
UpperCAmelCase : Tuple = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
UpperCAmelCase : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
UpperCAmelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase : str = q_bias
UpperCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : int = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCAmelCase : int = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
UpperCAmelCase : str = gamma_a
UpperCAmelCase : Dict = gamma_a
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase )
UpperCAmelCase : str = val
def a__ ( ) -> Optional[int]:
UpperCAmelCase : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Union[str, Any] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any]=False ) -> Union[str, Any]:
UpperCAmelCase : Dict = False if '''rvlcdip''' in checkpoint_url else True
UpperCAmelCase : Any = BeitConfig(use_absolute_position_embeddings=UpperCAmelCase , use_mask_token=UpperCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCAmelCase : List[Any] = 1_024
UpperCAmelCase : Optional[Any] = 4_096
UpperCAmelCase : Any = 24
UpperCAmelCase : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCAmelCase : Optional[Any] = 16
UpperCAmelCase : List[Any] = '''huggingface/label-files'''
UpperCAmelCase : Any = '''rvlcdip-id2label.json'''
UpperCAmelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Dict = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : Union[str, Any] = idalabel
UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCAmelCase : Tuple = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='''cpu''' )['''model''']
UpperCAmelCase : List[str] = create_rename_keys(UpperCAmelCase , has_lm_head=UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
read_in_q_k_v(UpperCAmelCase , UpperCAmelCase , has_lm_head=UpperCAmelCase )
# load HuggingFace model
UpperCAmelCase : Tuple = BeitForMaskedImageModeling(UpperCAmelCase ) if has_lm_head else BeitForImageClassification(UpperCAmelCase )
model.eval()
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image
UpperCAmelCase : Dict = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase )
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : Optional[Any] = image_processor(images=UpperCAmelCase , return_tensors='''pt''' )
UpperCAmelCase : str = encoding['''pixel_values''']
UpperCAmelCase : Any = model(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify logits
UpperCAmelCase : List[Any] = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(UpperCAmelCase ), "Shape of logits not as expected"
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
if has_lm_head:
UpperCAmelCase : List[Any] = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
UpperCAmelCase : Any = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCAmelCase , )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 336 | 0 |
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Any:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=True ) -> Dict:
model.train()
UpperCAmelCase__ : Optional[int] = model(lowerCAmelCase )
UpperCAmelCase__ : Dict = F.mse_loss(lowerCAmelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCAmelCase )
def a__ ( lowerCAmelCase , lowerCAmelCase=False ) -> Optional[int]:
set_seed(42 )
UpperCAmelCase__ : Optional[Any] = RegressionModel()
UpperCAmelCase__ : Any = deepcopy(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = RegressionDataset(length=80 )
UpperCAmelCase__ : Union[str, Any] = DataLoader(lowerCAmelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase__ : Optional[int] = AdamW(params=model.parameters() , lr=1E-3 )
UpperCAmelCase__ : Optional[int] = AdamW(params=ddp_model.parameters() , lr=1E-3 )
UpperCAmelCase__ : str = LambdaLR(lowerCAmelCase , lr_lambda=lambda lowerCAmelCase : epoch**0.65 )
UpperCAmelCase__ : Any = LambdaLR(lowerCAmelCase , lr_lambda=lambda lowerCAmelCase : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCAmelCase__ : List[str] = accelerator.prepare(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
UpperCAmelCase__ : List[str] = accelerator.prepare(lowerCAmelCase , lowerCAmelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def a__ ( lowerCAmelCase ) -> Any:
# Test when on a single CPU or GPU that the context manager does nothing
UpperCAmelCase__ : Tuple = get_training_setup(lowerCAmelCase )
# Use a single batch
UpperCAmelCase__ : Union[str, Any] = next(iter(lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase__ : Any = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase__ : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCAmelCase ):
step_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
# Sync grads
step_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase__ : List[Any] = ddp_input[torch.randperm(len(lowerCAmelCase ) )]
def a__ ( lowerCAmelCase ) -> str:
# Test on distributed setup that context manager behaves properly
UpperCAmelCase__ : Any = get_training_setup(lowerCAmelCase )
# Use a single batch
UpperCAmelCase__ : Optional[int] = next(iter(lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase__ : Optional[int] = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase__ : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCAmelCase ):
step_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
# Sync grads
step_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase__ : int = ddp_input[torch.randperm(len(lowerCAmelCase ) )]
def a__ ( lowerCAmelCase=False , lowerCAmelCase=False ) -> Optional[Any]:
UpperCAmelCase__ : Tuple = Accelerator(
split_batches=lowerCAmelCase , dispatch_batches=lowerCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase__ : Optional[int] = get_training_setup(lowerCAmelCase )
for iteration, batch in enumerate(lowerCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase__ : Tuple = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase__ : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCAmelCase ):
step_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCAmelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase__ : Any = ddp_input[torch.randperm(len(lowerCAmelCase ) )]
GradientState._reset_state()
def a__ ( lowerCAmelCase=False , lowerCAmelCase=False ) -> str:
UpperCAmelCase__ : List[str] = Accelerator(
split_batches=lowerCAmelCase , dispatch_batches=lowerCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase__ : int = get_training_setup(lowerCAmelCase , lowerCAmelCase )
for iteration, batch in enumerate(lowerCAmelCase ):
UpperCAmelCase__ : Dict = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase__ : List[str] = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase__ : Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCAmelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCAmelCase ):
step_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"""
UpperCAmelCase__ : Any = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCAmelCase ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def a__ ( ) -> Dict:
UpperCAmelCase__ : Union[str, Any] = Accelerator()
UpperCAmelCase__ : Optional[Any] = RegressionDataset(length=80 )
UpperCAmelCase__ : Any = DataLoader(lowerCAmelCase , batch_size=16 )
UpperCAmelCase__ : List[str] = RegressionDataset(length=96 )
UpperCAmelCase__ : List[Any] = DataLoader(lowerCAmelCase , batch_size=16 )
UpperCAmelCase__ : str = accelerator.prepare(lowerCAmelCase , lowerCAmelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase )
if iteration < len(lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase )
if batch_num < len(lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Optional[Any] = Accelerator()
UpperCAmelCase__ : Optional[Any] = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(lowerCAmelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(lowerCAmelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(lowerCAmelCase , lowerCAmelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(lowerCAmelCase , lowerCAmelCase )
def a__ ( lowerCAmelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 171 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[int], __A : Optional[int], __A : Any=1_3, __A : str=7, __A : Optional[int]=True, __A : Tuple=True, __A : Union[str, Any]=True, __A : Any=True, __A : Optional[int]=9_9, __A : Tuple=3_2, __A : str=5, __A : Union[str, Any]=4, __A : List[str]=3_7, __A : Tuple="gelu", __A : Optional[int]=0.1, __A : int=0.1, __A : Optional[Any]=5_1_2, __A : int=1_6, __A : Optional[Any]=2, __A : Union[str, Any]=0.0_2, __A : Optional[int]=4, ):
UpperCAmelCase : Any = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Any = seq_length
UpperCAmelCase : Tuple = is_training
UpperCAmelCase : str = use_attention_mask
UpperCAmelCase : List[str] = use_token_type_ids
UpperCAmelCase : int = use_labels
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : List[Any] = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = num_choices
def __magic_name__ ( self : str ):
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase : List[Any] = None
if self.use_attention_mask:
UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Any = None
if self.use_token_type_ids:
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = RobertaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__A, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def __magic_name__ ( self : int ):
UpperCAmelCase : Any = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = config_and_inputs
UpperCAmelCase : Any = True
UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = True
UpperCamelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Dict = FlaxRobertaModelTester(self )
@slow
def __magic_name__ ( self : Any ):
for model_class_name in self.all_model_classes:
UpperCAmelCase : Dict = model_class_name.from_pretrained('''roberta-base''', from_pt=__A )
UpperCAmelCase : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
| 336 | 0 |
def __UpperCamelCase ( _A ):
if length <= 0 or not isinstance(_A , _A ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(_A )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 278 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Dict = {"vocab_file": "vocab.txt"}
_lowerCamelCase : List[str] = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_lowerCamelCase : List[Any] = {
"facebook/esm2_t6_8M_UR50D": 1_0_2_4,
"facebook/esm2_t12_35M_UR50D": 1_0_2_4,
}
def a__ ( UpperCAmelCase : List[str] ) -> Any:
with open(UpperCAmelCase , '''r''' ) as f:
UpperCAmelCase : Dict = f.read().splitlines()
return [l.strip() for l in lines]
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Any, __A : Dict, __A : List[Any]="<unk>", __A : List[str]="<cls>", __A : Any="<pad>", __A : Union[str, Any]="<mask>", __A : int="<eos>", **__A : Tuple, ):
super().__init__(**__A )
UpperCAmelCase : Tuple = load_vocab_file(__A )
UpperCAmelCase : List[Any] = dict(enumerate(self.all_tokens ) )
UpperCAmelCase : str = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCAmelCase : Union[str, Any] = unk_token
UpperCAmelCase : Optional[Any] = cls_token
UpperCAmelCase : Optional[int] = pad_token
UpperCAmelCase : Optional[int] = mask_token
UpperCAmelCase : List[str] = eos_token
UpperCAmelCase : Optional[Any] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __magic_name__ ( self : Tuple, __A : int ):
return self._id_to_token.get(__A, self.unk_token )
def __magic_name__ ( self : List[Any], __A : str ):
return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) )
def __magic_name__ ( self : Any, __A : Optional[Any], **__A : Union[str, Any] ):
return text.split()
def __magic_name__ ( self : Optional[int], __A : Dict=False ):
return len(self._id_to_token )
def __magic_name__ ( self : int ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __magic_name__ ( self : Tuple, __A : str ):
return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) )
def __magic_name__ ( self : Any, __A : int ):
return self._id_to_token.get(__A, self.unk_token )
def __magic_name__ ( self : Union[str, Any], __A : List[int], __A : Optional[List[int]] = None ):
UpperCAmelCase : Optional[int] = [self.cls_token_id]
UpperCAmelCase : Optional[int] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __magic_name__ ( self : Any, __A : List, __A : Optional[List] = None, __A : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCAmelCase : Dict = [1] + ([0] * len(__A )) + [1]
if token_ids_a is not None:
mask += [0] * len(__A ) + [1]
return mask
def __magic_name__ ( self : Optional[int], __A : List[Any], __A : Dict ):
UpperCAmelCase : Union[str, Any] = os.path.join(__A, (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(__A, '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __magic_name__ ( self : Dict ):
return self.get_vocab_size(with_added_tokens=__A )
def __magic_name__ ( self : Optional[int], __A : Union[List[str], List[AddedToken]], __A : bool = False ):
return super()._add_tokens(__A, special_tokens=__A )
| 336 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 310 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A, '''tf_padding''' ) )
self.parent.assertTrue(hasattr(__A, '''depth_multiplier''' ) )
class __UpperCAmelCase :
def __init__( self : int, __A : List[Any], __A : str=1_3, __A : Dict=3, __A : int=3_2, __A : int=0.2_5, __A : List[str]=8, __A : int=8, __A : Dict=6, __A : str=3_2, __A : Any=True, __A : str=True, __A : int=True, __A : Union[str, Any]="relu6", __A : Any=1_2_8_0, __A : List[Any]=0.1, __A : Optional[Any]=0.0_2, __A : Tuple=True, __A : List[Any]=True, __A : str=1_0, __A : Optional[Any]=None, ):
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : str = image_size
UpperCAmelCase : Optional[int] = depth_multiplier
UpperCAmelCase : Union[str, Any] = depth_divisible_by
UpperCAmelCase : Optional[Any] = min_depth
UpperCAmelCase : List[str] = expand_ratio
UpperCAmelCase : Dict = tf_padding
UpperCAmelCase : str = output_stride
UpperCAmelCase : Union[str, Any] = first_layer_is_expansion
UpperCAmelCase : List[Any] = finegrained_output
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCAmelCase : Optional[Any] = classifier_dropout_prob
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : Tuple = num_labels
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Any = scope
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Dict = None
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size], self.num_labels )
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ ( self : Any ):
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, depth_divisible_by=self.depth_divisible_by, min_depth=self.min_depth, expand_ratio=self.expand_ratio, output_stride=self.output_stride, first_layer_is_expansion=self.first_layer_is_expansion, finegrained_output=self.finegrained_output, hidden_act=self.hidden_act, tf_padding=self.tf_padding, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def __magic_name__ ( self : List[Any], __A : Dict, __A : Optional[Any], __A : Optional[int], __A : Union[str, Any] ):
UpperCAmelCase : Any = MobileNetVaModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[Any] = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
self.parent.assertEqual(
result.pooler_output.shape, (self.batch_size, self.last_hidden_size), )
def __magic_name__ ( self : str, __A : Union[str, Any], __A : Dict, __A : Optional[Any], __A : str ):
UpperCAmelCase : Optional[int] = self.num_labels
UpperCAmelCase : Any = MobileNetVaForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[int] = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[Any], __A : Optional[Any], __A : List[str], __A : Dict, __A : Dict ):
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Dict = MobileNetVaForSemanticSegmentation(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
UpperCAmelCase : Optional[Any] = model(__A, labels=__A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = config_and_inputs
UpperCAmelCase : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[Any] = MobileNetVaModelTester(self )
UpperCAmelCase : List[Any] = MobileNetVaConfigTester(self, config_class=__A, has_text_modality=__A )
def __magic_name__ ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def __magic_name__ ( self : Optional[int] ):
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def __magic_name__ ( self : Tuple ):
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def __magic_name__ ( self : Any ):
pass
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(__A )
UpperCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : int ):
def check_hidden_states_output(__A : Any, __A : Optional[Any], __A : str ):
UpperCAmelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(__A, __A ) )
UpperCAmelCase : Optional[Any] = outputs.hidden_states
UpperCAmelCase : List[Any] = 1_6
self.assertEqual(len(__A ), __A )
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : int ):
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
@slow
def __magic_name__ ( self : Dict ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] = MobileNetVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> int:
UpperCAmelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : List[Any] ):
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : List[Any] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(__A )
UpperCAmelCase : Optional[int] = self.default_image_processor
UpperCAmelCase : Optional[Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : str = model(**__A )
# verify the logits
UpperCAmelCase : int = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Tuple = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@slow
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCAmelCase : List[Any] = model.to(__A )
UpperCAmelCase : Tuple = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : int = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(**__A )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify the logits
UpperCAmelCase : Tuple = torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape, __A )
UpperCAmelCase : Tuple = torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
], device=__A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], __A, atol=1E-4 ) )
| 336 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE :Optional[int] = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Optional[Any] = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :int = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :int = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Tuple = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 159 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """codegen"""
UpperCamelCase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any, __A : Optional[int]=5_0_4_0_0, __A : Tuple=2_0_4_8, __A : Optional[int]=2_0_4_8, __A : List[str]=4_0_9_6, __A : List[str]=2_8, __A : Union[str, Any]=1_6, __A : Tuple=6_4, __A : Union[str, Any]=None, __A : Union[str, Any]="gelu_new", __A : Any=0.0, __A : Dict=0.0, __A : str=0.0, __A : Optional[int]=1E-5, __A : Any=0.0_2, __A : Any=True, __A : Union[str, Any]=5_0_2_5_6, __A : List[str]=5_0_2_5_6, __A : int=False, **__A : List[Any], ):
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Tuple = n_ctx
UpperCAmelCase : Tuple = n_positions
UpperCAmelCase : Optional[int] = n_embd
UpperCAmelCase : Union[str, Any] = n_layer
UpperCAmelCase : List[str] = n_head
UpperCAmelCase : Tuple = n_inner
UpperCAmelCase : int = rotary_dim
UpperCAmelCase : List[Any] = activation_function
UpperCAmelCase : List[str] = resid_pdrop
UpperCAmelCase : Optional[Any] = embd_pdrop
UpperCAmelCase : str = attn_pdrop
UpperCAmelCase : Tuple = layer_norm_epsilon
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : Any = bos_token_id
UpperCAmelCase : List[str] = eos_token_id
super().__init__(
bos_token_id=__A, eos_token_id=__A, tie_word_embeddings=__A, **__A )
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Any, __A : PretrainedConfig, __A : str = "default", __A : List[PatchingSpec] = None, __A : bool = False, ):
super().__init__(__A, task=__A, patching_specs=__A, use_past=__A )
if not getattr(self._config, '''pad_token_id''', __A ):
# TODO: how to do that better?
UpperCAmelCase : Union[str, Any] = 0
@property
def __magic_name__ ( self : str ):
UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__A, direction='''inputs''' )
UpperCAmelCase : int = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __magic_name__ ( self : Dict ):
return self._config.n_layer
@property
def __magic_name__ ( self : List[str] ):
return self._config.n_head
def __magic_name__ ( self : str, __A : PreTrainedTokenizer, __A : int = -1, __A : int = -1, __A : bool = False, __A : Optional[TensorType] = None, ):
UpperCAmelCase : Union[str, Any] = super(__A, self ).generate_dummy_inputs(
__A, batch_size=__A, seq_length=__A, is_pair=__A, framework=__A )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase : str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase : str = seqlen + 2
UpperCAmelCase : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase : Optional[int] = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
UpperCAmelCase : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
UpperCAmelCase : Optional[Any] = ordered_inputs['''attention_mask'''].dtype
UpperCAmelCase : Dict = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__A, __A, dtype=__A )], dim=1 )
return ordered_inputs
@property
def __magic_name__ ( self : Tuple ):
return 1_3
| 336 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_a = ["small", "medium", "large"]
_a = "lm_head.decoder.weight"
_a = "lm_head.weight"
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str) -> List[Any]:
'''simple docstring'''
__lowercase = torch.load(UpperCamelCase_)
__lowercase = d.pop(UpperCamelCase_)
os.makedirs(UpperCamelCase_, exist_ok=UpperCamelCase_)
torch.save(UpperCamelCase_, os.path.join(UpperCamelCase_, UpperCamelCase_))
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
_a = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_a = os.path.join(args.dialogpt_path, F"{MODEL}_ft.pkl")
_a = F"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 17 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 336 | 0 |
from __future__ import annotations
def lowercase_ ( _A : list[list[int]] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = len(_A )
# We need to create solution object to save path.
lowerCamelCase__ : int = [[0 for _ in range(_A )] for _ in range(_A )]
lowerCamelCase__ : Union[str, Any] = run_maze(_A , 0 , 0 , _A )
if solved:
print("\n".join(str(_A ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def lowercase_ ( _A : list[list[int]] , _A : int , _A : int , _A : list[list[int]] ):
"""simple docstring"""
lowerCamelCase__ : Dict = len(_A )
# Final check point.
if i == j == (size - 1):
lowerCamelCase__ : Dict = 1
return True
lowerCamelCase__ : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds
lowerCamelCase__ : List[Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowerCamelCase__ : Any = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowerCamelCase__ : str = 1
# check for directions
if (
run_maze(_A , i + 1 , _A , _A )
or run_maze(_A , _A , j + 1 , _A )
or run_maze(_A , i - 1 , _A , _A )
or run_maze(_A , _A , j - 1 , _A )
):
return True
lowerCamelCase__ : Any = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 184 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __UpperCAmelCase :
# setable values
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None # sigma(t_i)
@classmethod
def __magic_name__ ( cls : Any ):
return cls()
@dataclass
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@property
def __magic_name__ ( self : Optional[int] ):
return True
@register_to_config
def __init__( self : Optional[int], __A : float = 0.0_2, __A : float = 1_0_0, __A : float = 1.0_0_7, __A : float = 8_0, __A : float = 0.0_5, __A : float = 5_0, ):
pass
def __magic_name__ ( self : Optional[Any] ):
return KarrasVeSchedulerState.create()
def __magic_name__ ( self : int, __A : KarrasVeSchedulerState, __A : int, __A : Tuple = () ):
UpperCAmelCase : Optional[Any] = jnp.arange(0, __A )[::-1].copy()
UpperCAmelCase : Union[str, Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__A, schedule=jnp.array(__A, dtype=jnp.floataa ), timesteps=__A, )
def __magic_name__ ( self : List[Any], __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : random.KeyArray, ):
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase : int = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1 )
else:
UpperCAmelCase : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase : Union[str, Any] = random.split(__A, num=1 )
UpperCAmelCase : List[str] = self.config.s_noise * random.normal(key=__A, shape=sample.shape )
UpperCAmelCase : Tuple = sigma + gamma * sigma
UpperCAmelCase : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : int = sample_hat + sigma_hat * model_output
UpperCAmelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase : int = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : jnp.ndarray, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : Tuple = sample_prev + sigma_prev * model_output
UpperCAmelCase : List[str] = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Optional[Any], __A : KarrasVeSchedulerState, __A : Optional[int], __A : int, __A : Union[str, Any] ):
raise NotImplementedError()
| 336 | 0 |
"""simple docstring"""
import cva
import numpy as np
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : int , UpperCamelCase : float , UpperCamelCase : int ):
'''simple docstring'''
if k in (0.04, 0.06):
__UpperCAmelCase : Tuple = k
__UpperCAmelCase : str = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Any ):
'''simple docstring'''
return str(self.k )
def lowerCamelCase__ ( self : str , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Tuple = cva.imread(__A , 0 )
__UpperCAmelCase : int = img.shape
__UpperCAmelCase : list[list[int]] = []
__UpperCAmelCase : int = img.copy()
__UpperCAmelCase : Union[str, Any] = cva.cvtColor(__A , cva.COLOR_GRAY2RGB )
__UpperCAmelCase : Union[str, Any] = np.gradient(__A )
__UpperCAmelCase : int = dx**2
__UpperCAmelCase : Dict = dy**2
__UpperCAmelCase : str = dx * dy
__UpperCAmelCase : Optional[int] = 0.04
__UpperCAmelCase : str = self.window_size // 2
for y in range(__A , h - offset ):
for x in range(__A , w - offset ):
__UpperCAmelCase : Any = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__UpperCAmelCase : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__UpperCAmelCase : Optional[int] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__UpperCAmelCase : str = (wxx * wyy) - (wxy**2)
__UpperCAmelCase : Any = wxx + wyy
__UpperCAmelCase : Optional[Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = HarrisCorner(0.04, 3)
UpperCAmelCase : Any = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 115 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __UpperCAmelCase ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
UpperCamelCase = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def a__ ( ) -> Dict:
if os.name == "nt":
UpperCAmelCase : List[str] = CursorInfo()
UpperCAmelCase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
UpperCAmelCase : Dict = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def a__ ( ) -> Optional[int]:
if os.name == "nt":
UpperCAmelCase : int = CursorInfo()
UpperCAmelCase : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
UpperCAmelCase : Any = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def a__ ( ) -> Optional[Any]:
try:
hide_cursor()
yield
finally:
show_cursor()
| 336 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__SCREAMING_SNAKE_CASE = i + 1
else:
__SCREAMING_SNAKE_CASE = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 100 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCamelCase : Tuple = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 336 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class snake_case_ ( lowerCamelCase__ ):
__A : str = "Speech2TextFeatureExtractor"
__A : int = "Speech2TextTokenizer"
def __init__( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str ) -> Tuple:
super().__init__(__A , __A )
lowercase__ : str = self.feature_extractor
lowercase__ : Any = False
def __call__( self : str , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> List[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowercase__ : Optional[Any] = kwargs.pop("raw_speech" )
else:
lowercase__ : List[Any] = kwargs.pop("audio" , __A )
lowercase__ : Optional[int] = kwargs.pop("sampling_rate" , __A )
lowercase__ : Tuple = kwargs.pop("text" , __A )
if len(__A ) > 0:
lowercase__ : List[str] = args[0]
lowercase__ : Optional[Any] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowercase__ : int = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if text is not None:
lowercase__ : Union[str, Any] = self.tokenizer(__A , **__A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase__ : Optional[Any] = encodings['''input_ids''']
return inputs
def __UpperCamelCase ( self : str , *lowercase_ : Union[str, Any] , **lowercase_ : Union[str, Any] ) -> Any:
return self.tokenizer.batch_decode(*__A , **__A )
def __UpperCamelCase ( self : Optional[int] , *lowercase_ : Any , **lowercase_ : Dict ) -> Dict:
return self.tokenizer.decode(*__A , **__A )
@contextmanager
def __UpperCamelCase ( self : Any ) -> Optional[int]:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowercase__ : List[str] = True
lowercase__ : List[Any] = self.tokenizer
yield
lowercase__ : Tuple = self.feature_extractor
lowercase__ : Union[str, Any] = False
| 87 |
from __future__ import annotations
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int ) -> list[str]:
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
UpperCAmelCase : str = number_of_bytes // partitions
UpperCAmelCase : Dict = []
for i in range(UpperCAmelCase ):
UpperCAmelCase : int = i * bytes_per_partition + 1
UpperCAmelCase : Optional[int] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
UpperCAmelCase_ : int = logging.get_logger(__name__)
@dataclass
class lowercase__ ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **__snake_case ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_SCREAMING_SNAKE_CASE : Any = deprecated_arg[3:]
_SCREAMING_SNAKE_CASE : List[str] = not kwargs.pop(__A )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
_SCREAMING_SNAKE_CASE : Any = kwargs.pop("""tpu_name""" , self.tpu_name )
_SCREAMING_SNAKE_CASE : str = kwargs.pop("""device_idx""" , self.device_idx )
_SCREAMING_SNAKE_CASE : Any = kwargs.pop("""eager_mode""" , self.eager_mode )
_SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**__A )
A_ : List[str] = field(
default=lowerCamelCase__ , metadata={"""help""": """Name of TPU"""} , )
A_ : Dict = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
A_ : List[str] = field(default=lowerCamelCase__ , metadata={"""help""": """Benchmark models in eager model."""} )
A_ : List[Any] = field(
default=lowerCamelCase__ , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def UpperCAmelCase_ ( self ):
requires_backends(self , ["""tf"""] )
_SCREAMING_SNAKE_CASE : Any = None
if self.tpu:
try:
if self.tpu_name:
_SCREAMING_SNAKE_CASE : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_SCREAMING_SNAKE_CASE : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_SCREAMING_SNAKE_CASE : Optional[Any] = None
return tpu
@cached_property
def UpperCAmelCase_ ( self ):
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_SCREAMING_SNAKE_CASE : List[str] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def UpperCAmelCase_ ( self ):
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def UpperCAmelCase_ ( self ):
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def UpperCAmelCase_ ( self ):
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def UpperCAmelCase_ ( self ):
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCAmelCase_ ( self ):
return self.n_gpu > 0
| 200 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_lowerCamelCase : Union[str, Any] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def a__ ( UpperCAmelCase : Dict=None ) -> Optional[int]:
if subparsers is not None:
UpperCAmelCase : Tuple = subparsers.add_parser('''tpu-config''' , description=_description )
else:
UpperCAmelCase : Dict = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
UpperCAmelCase : Optional[int] = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=UpperCAmelCase , default=UpperCAmelCase , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=UpperCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=UpperCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
UpperCAmelCase : Union[str, Any] = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=UpperCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase )
return parser
def a__ ( UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCAmelCase : List[Any] = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCAmelCase : List[str] = defaults.commands
if not args.tpu_name:
UpperCAmelCase : Tuple = defaults.tpu_name
if not args.tpu_zone:
UpperCAmelCase : int = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCAmelCase : Tuple = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
UpperCAmelCase : Dict = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , UpperCAmelCase ):
UpperCAmelCase : Optional[int] = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
UpperCAmelCase : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , UpperCAmelCase ):
UpperCAmelCase : int = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCAmelCase : Optional[int] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
UpperCAmelCase : int = '''; '''.join(UpperCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCAmelCase : Any = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {" ".join(UpperCAmelCase )}''' )
return
subprocess.run(UpperCAmelCase )
print('''Successfully setup pod.''' )
def a__ ( ) -> Any:
UpperCAmelCase : Any = tpu_command_parser()
UpperCAmelCase : Tuple = parser.parse_args()
tpu_command_launcher(UpperCAmelCase )
| 336 | 0 |
"""simple docstring"""
from string import ascii_lowercase, ascii_uppercase
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
if not sentence:
return ""
a__: Union[str, Any] = dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 290 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
print('''Loading config file...''' )
def flatten_yaml_as_dict(UpperCAmelCase : Tuple , UpperCAmelCase : Any="" , UpperCAmelCase : Dict="." ):
UpperCAmelCase : List[str] = []
for k, v in d.items():
UpperCAmelCase : List[Any] = parent_key + sep + k if parent_key else k
if isinstance(UpperCAmelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCAmelCase , UpperCAmelCase , sep=UpperCAmelCase ).items() )
else:
items.append((new_key, v) )
return dict(UpperCAmelCase )
UpperCAmelCase : List[str] = argparse.Namespace()
with open(UpperCAmelCase , '''r''' ) as yaml_file:
try:
UpperCAmelCase : List[str] = yaml.load(UpperCAmelCase , Loader=yaml.FullLoader )
UpperCAmelCase : Optional[int] = flatten_yaml_as_dict(UpperCAmelCase )
for k, v in flat_cfg.items():
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(UpperCAmelCase , str(UpperCAmelCase ) ) )
return config
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> List[Any]:
UpperCAmelCase : int = MobileViTVaConfig()
UpperCAmelCase : str = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
UpperCAmelCase : Any = 1_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase : Any = 384
else:
UpperCAmelCase : Tuple = 256
UpperCAmelCase : int = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
UpperCAmelCase : Optional[Any] = 21_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase : str = 384
else:
UpperCAmelCase : Dict = 256
UpperCAmelCase : List[Any] = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
UpperCAmelCase : Optional[Any] = 151
UpperCAmelCase : Tuple = 512
UpperCAmelCase : Tuple = '''ade20k-id2label.json'''
UpperCAmelCase : Tuple = True
elif task_name.startswith('''voc_''' ):
UpperCAmelCase : Dict = 21
UpperCAmelCase : str = 512
UpperCAmelCase : Union[str, Any] = '''pascal-voc-id2label.json'''
UpperCAmelCase : Dict = True
# orig_config
UpperCAmelCase : List[Any] = load_orig_config_file(UpperCAmelCase )
assert getattr(UpperCAmelCase , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
UpperCAmelCase : Tuple = getattr(UpperCAmelCase , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(UpperCAmelCase , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCAmelCase : int = getattr(UpperCAmelCase , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCAmelCase : str = getattr(UpperCAmelCase , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
UpperCAmelCase : int = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
UpperCAmelCase : Any = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
UpperCAmelCase : Optional[Any] = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
UpperCAmelCase : Union[str, Any] = '''huggingface/label-files'''
UpperCAmelCase : List[Any] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Any = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : int = idalabel
UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] ) -> List[str]:
UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase )
UpperCAmelCase : List[str] = val
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=False ) -> Union[str, Any]:
if base_model:
UpperCAmelCase : Dict = ''''''
else:
UpperCAmelCase : Dict = '''mobilevitv2.'''
UpperCAmelCase : Optional[int] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCAmelCase : List[str] = k[8:]
else:
UpperCAmelCase : Dict = k
if ".block." in k:
UpperCAmelCase : List[Any] = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
UpperCAmelCase : List[str] = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
UpperCAmelCase : Union[str, Any] = k_new.replace('''conv_1.''' , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
UpperCAmelCase : Optional[Any] = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
UpperCAmelCase : int = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
UpperCAmelCase : Any = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
UpperCAmelCase : str = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
UpperCAmelCase : int = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
UpperCAmelCase : Dict = [0, 1]
elif i == 4:
UpperCAmelCase : Dict = [0, 1, 2, 3]
elif i == 5:
UpperCAmelCase : int = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
UpperCAmelCase : Optional[Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
UpperCAmelCase : Any = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
UpperCAmelCase : Optional[Any] = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
UpperCAmelCase : List[Any] = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
UpperCAmelCase : List[Any] = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
UpperCAmelCase : Any = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
UpperCAmelCase : Union[str, Any] = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
UpperCAmelCase : Tuple = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def a__ ( UpperCAmelCase : Union[str, Any] ) -> Any:
UpperCAmelCase : str = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(UpperCAmelCase )
for k in keys_to_ignore:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def a__ ( ) -> Union[str, Any]:
UpperCAmelCase : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCAmelCase : List[str] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = get_mobilevitva_config(UpperCAmelCase , UpperCAmelCase )
# load original state_dict
UpperCAmelCase : List[str] = torch.load(UpperCAmelCase , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
UpperCAmelCase : str = MobileViTVaForSemanticSegmentation(UpperCAmelCase ).eval()
UpperCAmelCase : str = False
else:
UpperCAmelCase : Union[str, Any] = MobileViTVaForImageClassification(UpperCAmelCase ).eval()
UpperCAmelCase : Any = False
# remove and rename some keys of load the original model
UpperCAmelCase : Optional[Any] = checkpoint
remove_unused_keys(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = create_rename_keys(UpperCAmelCase , base_model=UpperCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# load modified state_dict
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase : Dict = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase : Any = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase : Union[str, Any] = model(**UpperCAmelCase )
# verify classification model
if task_name.startswith('''imagenet''' ):
UpperCAmelCase : Optional[Any] = outputs.logits
UpperCAmelCase : int = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCAmelCase : str = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] )
assert torch.allclose(logits[0, :3] , UpperCAmelCase , atol=1E-4 )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 336 | 0 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
_A = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def a__ ( lowerCAmelCase = "dhaka" , lowerCAmelCase = 5 ) -> int:
UpperCAmelCase__ : List[Any] = min(lowerCAmelCase , 50 ) # Prevent abuse!
UpperCAmelCase__ : Tuple = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
UpperCAmelCase__ : Optional[Any] = requests.get("""https://www.google.com/search""" , params=lowerCAmelCase , headers=lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = BeautifulSoup(html.text , """html.parser""" )
UpperCAmelCase__ : Optional[Any] = ''''''.join(
re.findall(R"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script""" ) ) ) )
UpperCAmelCase__ : Tuple = json.dumps(lowerCAmelCase )
UpperCAmelCase__ : Dict = json.loads(lowerCAmelCase )
UpperCAmelCase__ : Any = re.findall(
R"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , lowerCAmelCase , )
if not matched_google_image_data:
return 0
UpperCAmelCase__ : Optional[Any] = re.sub(
R"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(lowerCAmelCase ) , )
UpperCAmelCase__ : List[Any] = re.findall(
R"""(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , lowerCAmelCase , )
for index, fixed_full_res_image in enumerate(lowerCAmelCase ):
if index >= max_images:
return index
UpperCAmelCase__ : Tuple = bytes(lowerCAmelCase , """ascii""" ).decode(
"""unicode-escape""" )
UpperCAmelCase__ : int = bytes(lowerCAmelCase , """ascii""" ).decode(
"""unicode-escape""" )
UpperCAmelCase__ : str = urllib.request.build_opener()
UpperCAmelCase__ : Union[str, Any] = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(lowerCAmelCase )
UpperCAmelCase__ : int = F"""query_{query.replace(' ' , '_' )}"""
if not os.path.exists(lowerCAmelCase ):
os.makedirs(lowerCAmelCase )
urllib.request.urlretrieve( # noqa: S310
lowerCAmelCase , F"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
_A = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print("""Please provide a search term.""")
raise
| 171 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __UpperCAmelCase ( lowerCamelCase__ ):
def __get__( self : Tuple, __A : Optional[Any], __A : Optional[int]=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
UpperCAmelCase : str = '''__cached_''' + self.fget.__name__
UpperCAmelCase : int = getattr(__A, __A, __A )
if cached is None:
UpperCAmelCase : Any = self.fget(__A )
setattr(__A, __A, __A )
return cached
def a__ ( UpperCAmelCase : Optional[Any] ) -> Any:
UpperCAmelCase : Any = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
if is_torch_fx_proxy(UpperCAmelCase ):
return True
if is_torch_available():
import torch
if isinstance(UpperCAmelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(UpperCAmelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(UpperCAmelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(UpperCAmelCase , np.ndarray )
def a__ ( UpperCAmelCase : List[Any] ) -> Union[str, Any]:
return isinstance(UpperCAmelCase , np.ndarray )
def a__ ( UpperCAmelCase : str ) -> Tuple:
return _is_numpy(UpperCAmelCase )
def a__ ( UpperCAmelCase : str ) -> List[Any]:
import torch
return isinstance(UpperCAmelCase , torch.Tensor )
def a__ ( UpperCAmelCase : str ) -> List[Any]:
return False if not is_torch_available() else _is_torch(UpperCAmelCase )
def a__ ( UpperCAmelCase : Tuple ) -> List[str]:
import torch
return isinstance(UpperCAmelCase , torch.device )
def a__ ( UpperCAmelCase : Any ) -> Any:
return False if not is_torch_available() else _is_torch_device(UpperCAmelCase )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
import torch
if isinstance(UpperCAmelCase , UpperCAmelCase ):
if hasattr(UpperCAmelCase , UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase )
else:
return False
return isinstance(UpperCAmelCase , torch.dtype )
def a__ ( UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
return False if not is_torch_available() else _is_torch_dtype(UpperCAmelCase )
def a__ ( UpperCAmelCase : Any ) -> str:
import tensorflow as tf
return isinstance(UpperCAmelCase , tf.Tensor )
def a__ ( UpperCAmelCase : int ) -> Union[str, Any]:
return False if not is_tf_available() else _is_tensorflow(UpperCAmelCase )
def a__ ( UpperCAmelCase : List[str] ) -> Tuple:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(UpperCAmelCase , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(UpperCAmelCase )
return type(UpperCAmelCase ) == tf.Tensor
def a__ ( UpperCAmelCase : int ) -> List[Any]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCAmelCase )
def a__ ( UpperCAmelCase : List[Any] ) -> Dict:
import jax.numpy as jnp # noqa: F811
return isinstance(UpperCAmelCase , jnp.ndarray )
def a__ ( UpperCAmelCase : List[Any] ) -> Optional[int]:
return False if not is_flax_available() else _is_jax(UpperCAmelCase )
def a__ ( UpperCAmelCase : int ) -> Tuple:
if isinstance(UpperCAmelCase , (dict, UserDict) ):
return {k: to_py_obj(UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(UpperCAmelCase , (list, tuple) ):
return [to_py_obj(UpperCAmelCase ) for o in obj]
elif is_tf_tensor(UpperCAmelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(UpperCAmelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(UpperCAmelCase ):
return np.asarray(UpperCAmelCase ).tolist()
elif isinstance(UpperCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a__ ( UpperCAmelCase : Any ) -> List[str]:
if isinstance(UpperCAmelCase , (dict, UserDict) ):
return {k: to_numpy(UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(UpperCAmelCase , (list, tuple) ):
return np.array(UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
return obj.numpy()
elif is_torch_tensor(UpperCAmelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(UpperCAmelCase ):
return np.asarray(UpperCAmelCase )
else:
return obj
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Optional[Any] = fields(self )
# Safety and consistency checks
if not len(__A ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
UpperCAmelCase : int = getattr(self, class_fields[0].name )
UpperCAmelCase : str = all(getattr(self, field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__A ):
if isinstance(__A, __A ):
UpperCAmelCase : Tuple = first_field.items()
UpperCAmelCase : Any = True
else:
try:
UpperCAmelCase : Optional[Any] = iter(__A )
UpperCAmelCase : Optional[Any] = True
except TypeError:
UpperCAmelCase : Optional[int] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__A ):
if (
not isinstance(__A, (list, tuple) )
or not len(__A ) == 2
or not isinstance(element[0], __A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase : Any = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self, element[0], element[1] )
if element[1] is not None:
UpperCAmelCase : Union[str, Any] = element[1]
elif first_field is not None:
UpperCAmelCase : Union[str, Any] = first_field
else:
for field in class_fields:
UpperCAmelCase : Optional[Any] = getattr(self, field.name )
if v is not None:
UpperCAmelCase : Optional[int] = v
def __delitem__( self : Union[str, Any], *__A : str, **__A : Tuple ):
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : List[str], *__A : Union[str, Any], **__A : Optional[Any] ):
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : Any, *__A : Dict, **__A : str ):
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : Dict, *__A : int, **__A : Dict ):
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : List[str], __A : List[str] ):
if isinstance(__A, __A ):
UpperCAmelCase : int = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[Any], __A : Dict, __A : Union[str, Any] ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__A, __A )
super().__setattr__(__A, __A )
def __setitem__( self : Dict, __A : List[Any], __A : Union[str, Any] ):
# Will raise a KeyException if needed
super().__setitem__(__A, __A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__A, __A )
def __magic_name__ ( self : List[str] ):
return tuple(self[k] for k in self.keys() )
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@classmethod
def __magic_name__ ( cls : List[Any], __A : Tuple ):
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """longest"""
UpperCamelCase = """max_length"""
UpperCamelCase = """do_not_pad"""
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """pt"""
UpperCamelCase = """tf"""
UpperCamelCase = """np"""
UpperCamelCase = """jax"""
class __UpperCAmelCase :
def __init__( self : Any, __A : List[ContextManager] ):
UpperCAmelCase : Tuple = context_managers
UpperCAmelCase : Tuple = ExitStack()
def __enter__( self : Any ):
for context_manager in self.context_managers:
self.stack.enter_context(__A )
def __exit__( self : List[Any], *__A : Union[str, Any], **__A : Dict ):
self.stack.__exit__(*__A, **__A )
def a__ ( UpperCAmelCase : Union[str, Any] ) -> str:
UpperCAmelCase : int = infer_framework(UpperCAmelCase )
if framework == "tf":
UpperCAmelCase : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a__ ( UpperCAmelCase : Dict ) -> Any:
UpperCAmelCase : List[Any] = model_class.__name__
UpperCAmelCase : Union[str, Any] = infer_framework(UpperCAmelCase )
if framework == "tf":
UpperCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : Dict = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a__ ( UpperCAmelCase : MutableMapping , UpperCAmelCase : str = "" , UpperCAmelCase : str = "." ) -> Union[str, Any]:
def _flatten_dict(UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]="" , UpperCAmelCase : Any="." ):
for k, v in d.items():
UpperCAmelCase : List[str] = str(UpperCAmelCase ) + delimiter + str(UpperCAmelCase ) if parent_key else k
if v and isinstance(UpperCAmelCase , UpperCAmelCase ):
yield from flatten_dict(UpperCAmelCase , UpperCAmelCase , delimiter=UpperCAmelCase ).items()
else:
yield key, v
return dict(_flatten_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
@contextmanager
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : bool = False ) -> Optional[Any]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=None ) -> Optional[Any]:
if is_numpy_array(UpperCAmelCase ):
return np.transpose(UpperCAmelCase , axes=UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.T if axes is None else array.permute(*UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.transpose(UpperCAmelCase , perm=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.transpose(UpperCAmelCase , axes=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for transpose: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : str , UpperCAmelCase : Optional[int] ) -> List[str]:
if is_numpy_array(UpperCAmelCase ):
return np.reshape(UpperCAmelCase , UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.reshape(*UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.reshape(UpperCAmelCase , UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.reshape(UpperCAmelCase , UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for reshape: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=None ) -> Any:
if is_numpy_array(UpperCAmelCase ):
return np.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for squeeze: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : str , UpperCAmelCase : int ) -> str:
if is_numpy_array(UpperCAmelCase ):
return np.expand_dims(UpperCAmelCase , UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.unsqueeze(dim=UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.expand_dims(UpperCAmelCase , axis=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.expand_dims(UpperCAmelCase , axis=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
if is_numpy_array(UpperCAmelCase ):
return np.size(UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.numel()
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.size(UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ) -> Dict:
for key, value in auto_map.items():
if isinstance(UpperCAmelCase , (tuple, list) ):
UpperCAmelCase : List[Any] = [f'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase : List[Any] = f'''{repo_id}--{value}'''
return auto_map
def a__ ( UpperCAmelCase : Tuple ) -> Union[str, Any]:
for base_class in inspect.getmro(UpperCAmelCase ):
UpperCAmelCase : Any = base_class.__module__
UpperCAmelCase : Dict = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 336 | 0 |
from abc import ABC, abstractmethod
from typing import List, Optional
class A ( lowerCamelCase__ ):
def __init__( self ):
"""simple docstring"""
self.test()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = 0
lowerCAmelCase_ = False
while not completed:
if counter == 1:
self.reset()
lowerCAmelCase_ = self.advance()
if not self.does_advance(__A ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
lowerCAmelCase_ = self.update(__A )
counter += 1
if counter > 1_0000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__=False ):
"""simple docstring"""
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class A ( lowerCamelCase__ ):
def __init__( self, UpperCamelCase__ ):
"""simple docstring"""
super(__A, self ).__init__()
if not isinstance(__A, __A ) or len(__A ) == 0:
raise ValueError(f"`token_ids` has to be a non-empty list, but is {token_ids}." )
if any((not isinstance(__A, __A ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}." )
lowerCAmelCase_ = token_ids
lowerCAmelCase_ = len(self.token_ids )
lowerCAmelCase_ = -1 # the index of the currently fulfilled step
lowerCAmelCase_ = False
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(__A, __A ):
raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(__A )}" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(__A, __A ):
raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(__A )}" )
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
if self.does_advance(__A ):
self.fulfilled_idx += 1
lowerCAmelCase_ = True
if self.fulfilled_idx == (self.seqlen - 1):
lowerCAmelCase_ = True
lowerCAmelCase_ = completed
else:
# failed to make progress.
lowerCAmelCase_ = True
self.reset()
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = False
lowerCAmelCase_ = 0
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__=False ):
"""simple docstring"""
lowerCAmelCase_ = PhrasalConstraint(self.token_ids )
if stateful:
lowerCAmelCase_ = self.seqlen
lowerCAmelCase_ = self.fulfilled_idx
lowerCAmelCase_ = self.completed
return new_constraint
class A :
def __init__( self, UpperCamelCase__, UpperCamelCase__=True ):
"""simple docstring"""
lowerCAmelCase_ = max([len(__A ) for one in nested_token_ids] )
lowerCAmelCase_ = {}
for token_ids in nested_token_ids:
lowerCAmelCase_ = root
for tidx, token_id in enumerate(__A ):
if token_id not in level:
lowerCAmelCase_ = {}
lowerCAmelCase_ = level[token_id]
if no_subsets and self.has_subsets(__A, __A ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
f" {nested_token_ids}." )
lowerCAmelCase_ = root
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.trie
for current_token in current_seq:
lowerCAmelCase_ = start[current_token]
lowerCAmelCase_ = list(start.keys() )
return next_tokens
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.next_tokens(__A )
return len(__A ) == 0
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = list(root.values() )
if len(__A ) == 0:
return 1
else:
return sum([self.count_leaves(__A ) for nn in next_nodes] )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.count_leaves(__A )
return len(__A ) != leaf_count
class A ( lowerCamelCase__ ):
def __init__( self, UpperCamelCase__ ):
"""simple docstring"""
super(__A, self ).__init__()
if not isinstance(__A, __A ) or len(__A ) == 0:
raise ValueError(f"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}." )
if any(not isinstance(__A, __A ) for token_ids in nested_token_ids ):
raise ValueError(f"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}." )
if any(
any((not isinstance(__A, __A ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." )
lowerCAmelCase_ = DisjunctiveTrie(__A )
lowerCAmelCase_ = nested_token_ids
lowerCAmelCase_ = self.trie.max_height
lowerCAmelCase_ = []
lowerCAmelCase_ = False
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.trie.next_tokens(self.current_seq )
if len(__A ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(__A, __A ):
raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(__A )}" )
lowerCAmelCase_ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(__A, __A ):
raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(__A )}" )
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
if self.does_advance(__A ):
self.current_seq.append(__A )
lowerCAmelCase_ = True
else:
lowerCAmelCase_ = True
self.reset()
lowerCAmelCase_ = self.trie.reached_leaf(self.current_seq )
lowerCAmelCase_ = completed
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = False
lowerCAmelCase_ = []
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__=False ):
"""simple docstring"""
lowerCAmelCase_ = DisjunctiveConstraint(self.token_ids )
if stateful:
lowerCAmelCase_ = self.seqlen
lowerCAmelCase_ = self.current_seq
lowerCAmelCase_ = self.completed
return new_constraint
class A :
def __init__( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = constraints
# max # of steps required to fulfill a given constraint
lowerCAmelCase_ = max([c.seqlen for c in constraints] )
lowerCAmelCase_ = len(__A )
lowerCAmelCase_ = False
self.init_state()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = []
lowerCAmelCase_ = None
lowerCAmelCase_ = [constraint.copy(stateful=__A ) for constraint in self.constraints]
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowerCAmelCase_ = constraint.advance()
if isinstance(__A, __A ):
token_list.append(__A )
elif isinstance(__A, __A ):
token_list.extend(__A )
else:
lowerCAmelCase_ = self.inprogress_constraint.advance()
if isinstance(__A, __A ):
token_list.append(__A )
elif isinstance(__A, __A ):
token_list.extend(__A )
if len(__A ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowerCAmelCase_ = self.add(__A )
# the entire list of constraints are fulfilled
if self.completed:
break
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(__A, __A ):
raise ValueError(f"`token_id` should be an `int`, but is `{token_id}`." )
lowerCAmelCase_ = False, False
if self.completed:
lowerCAmelCase_ = True
lowerCAmelCase_ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowerCAmelCase_ = self.inprogress_constraint.update(__A )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__A ) )
lowerCAmelCase_ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowerCAmelCase_ = None
if len(self.pending_constraints ) == 0:
# we're done!
lowerCAmelCase_ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(__A ):
lowerCAmelCase_ = pending_constraint.update(__A )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(__A )
lowerCAmelCase_ = None
if not complete and stepped:
lowerCAmelCase_ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowerCAmelCase_ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowerCAmelCase_ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__=True ):
"""simple docstring"""
lowerCAmelCase_ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowerCAmelCase_ = [
constraint.copy(stateful=__A ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowerCAmelCase_ = self.inprogress_constraint.copy(stateful=__A )
lowerCAmelCase_ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 278 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = LayoutLMTokenizer
UpperCamelCase = LayoutLMTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __magic_name__ ( self : Any ):
super().setUp()
UpperCAmelCase : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __magic_name__ ( self : Union[str, Any], **__A : List[str] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname, **__A )
def __magic_name__ ( self : Optional[int], __A : int ):
UpperCAmelCase : Optional[Any] = '''UNwant\u00E9d,running'''
UpperCAmelCase : Optional[int] = '''unwanted, running'''
return input_text, output_text
def __magic_name__ ( self : Any ):
UpperCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__A, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ), [7, 4, 5, 1_0, 8, 9] )
def __magic_name__ ( self : Optional[int] ):
pass
| 336 | 0 |
import argparse
from collections import defaultdict
import yaml
__snake_case = "docs/source/en/_toctree.yml"
def _A ( _lowercase ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = defaultdict(_lowercase )
for doc in model_doc:
counts[doc["local"]] += 1
__UpperCamelCase = [key for key, value in counts.items() if value > 1]
__UpperCamelCase = []
for duplicate_key in duplicates:
__UpperCamelCase = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(_lowercase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(_lowercase , key=lambda _lowercase : s["title"].lower() )
def _A ( _lowercase=False ) -> List[Any]:
"""simple docstring"""
with open(_lowercase , encoding='utf-8' ) as f:
__UpperCamelCase = yaml.safe_load(f.read() )
# Get to the API doc
__UpperCamelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__UpperCamelCase = content[api_idx]['''sections''']
# Then to the model doc
__UpperCamelCase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
__UpperCamelCase = api_doc[model_idx]['''sections''']
__UpperCamelCase = [(idx, section) for idx, section in enumerate(_lowercase ) if '''sections''' in section]
__UpperCamelCase = False
for idx, modality_doc in modalities_docs:
__UpperCamelCase = modality_doc['''sections''']
__UpperCamelCase = clean_model_doc_toc(_lowercase )
if old_modality_doc != new_modality_doc:
__UpperCamelCase = True
if overwrite:
__UpperCamelCase = new_modality_doc
if diff:
if overwrite:
__UpperCamelCase = model_doc
__UpperCamelCase = api_doc
with open(_lowercase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_lowercase , allow_unicode=_lowercase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__snake_case = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 310 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCAmelCase :
def __init__( self : Any, __A : str, __A : Dict=1_3, __A : int=3_0, __A : Tuple=2, __A : Union[str, Any]=3, __A : Any=True, __A : str=True, __A : Dict=3_2, __A : List[Any]=2, __A : Optional[Any]=4, __A : Union[str, Any]=3_7, __A : int="gelu", __A : int=0.1, __A : List[Any]=0.1, __A : Tuple=1_0, __A : Tuple=0.0_2, __A : Any=3, __A : List[str]=0.6, __A : Any=None, ):
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : Dict = patch_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : Union[str, Any] = is_training
UpperCAmelCase : Union[str, Any] = use_labels
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : Optional[int] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Tuple = mask_ratio
UpperCAmelCase : Any = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase : Tuple = (image_size // patch_size) ** 2
UpperCAmelCase : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : str = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Optional[Any] ):
return ViTMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, decoder_hidden_size=self.hidden_size, decoder_num_hidden_layers=self.num_hidden_layers, decoder_num_attention_heads=self.num_attention_heads, decoder_intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, mask_ratio=self.mask_ratio, )
def __magic_name__ ( self : str, __A : List[Any], __A : Any, __A : Any ):
UpperCAmelCase : Optional[Any] = TFViTMAEModel(config=__A )
UpperCAmelCase : Tuple = model(__A, training=__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Tuple, __A : str, __A : int, __A : str ):
UpperCAmelCase : Dict = TFViTMAEForPreTraining(__A )
UpperCAmelCase : int = model(__A, training=__A )
# expected sequence length = num_patches
UpperCAmelCase : int = (self.image_size // self.patch_size) ** 2
UpperCAmelCase : Optional[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase : Tuple = 1
UpperCAmelCase : List[Any] = TFViTMAEForPreTraining(__A )
UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = model(__A, training=__A )
UpperCAmelCase : Union[str, Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : Union[str, Any] = config_and_inputs
UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
UpperCamelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = TFViTMAEModelTester(self )
UpperCAmelCase : int = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 )
def __magic_name__ ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[str] = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
UpperCAmelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A, tf.keras.layers.Layer ) )
def __magic_name__ ( self : str ):
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Any = model_class(__A )
UpperCAmelCase : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : int = [*signature.parameters.keys()]
UpperCAmelCase : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def __magic_name__ ( self : int ):
# make the mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : str = model_class(__A )
UpperCAmelCase : int = self._prepare_for_class(__A, __A )
UpperCAmelCase : Dict = model(__A, noise=__A )
UpperCAmelCase : Any = copy.deepcopy(self._prepare_for_class(__A, __A ) )
UpperCAmelCase : Union[str, Any] = model(**__A, noise=__A )
UpperCAmelCase : Dict = outputs_dict[0].numpy()
UpperCAmelCase : Tuple = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ), 1E-6 )
def __magic_name__ ( self : Optional[Any] ):
# make the mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : str = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__A : Union[str, Any] ):
UpperCAmelCase : str = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__A ):
UpperCAmelCase : Tuple = v.numpy()
else:
UpperCAmelCase : str = np.array(__A )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
UpperCAmelCase : Any = self._prepare_for_class(__A, __A )
UpperCAmelCase : Optional[int] = prepare_numpy_arrays(__A )
UpperCAmelCase : str = model(__A, noise=__A )
UpperCAmelCase : str = model(**__A, noise=__A )
self.assert_outputs_same(__A, __A )
def __magic_name__ ( self : int, __A : str, __A : Union[str, Any], __A : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase : Any = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase : int = tf.constant(__A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase : List[Any] = tf_noise
super().check_pt_tf_models(__A, __A, __A )
def __magic_name__ ( self : str ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Union[str, Any] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__A )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(__A, __A ),)
if isinstance(__A, __A )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__A, '''_keras_serializable''', __A )
}
UpperCAmelCase : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase : str = tf.convert_to_tensor(__A )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
UpperCAmelCase : Tuple = main_layer_class(__A )
UpperCAmelCase : int = {
name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCAmelCase : List[Any] = tf.keras.Model(__A, outputs=main_layer(__A ) )
UpperCAmelCase : List[Any] = model(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = os.path.join(__A, '''keras_model.h5''' )
model.save(__A )
UpperCAmelCase : List[str] = tf.keras.models.load_model(
__A, custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__A, tf.keras.Model )
UpperCAmelCase : Tuple = model(__A )
self.assert_outputs_same(__A, __A )
@slow
def __magic_name__ ( self : Dict ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : List[str] = self._prepare_for_class(__A, __A )
UpperCAmelCase : Union[str, Any] = model(__A, noise=__A )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase : Optional[int] = outputs.last_hidden_state.numpy()
UpperCAmelCase : Union[str, Any] = 0
else:
UpperCAmelCase : Optional[int] = outputs.logits.numpy()
UpperCAmelCase : int = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A, saved_model=__A )
UpperCAmelCase : Dict = model_class.from_pretrained(__A )
UpperCAmelCase : str = model(__A, noise=__A )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase : int = after_outputs['''last_hidden_state'''].numpy()
UpperCAmelCase : Dict = 0
else:
UpperCAmelCase : Any = after_outputs['''logits'''].numpy()
UpperCAmelCase : Dict = 0
UpperCAmelCase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__A, 1E-5 )
def __magic_name__ ( self : Optional[Any] ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
UpperCAmelCase : int = self._prepare_for_class(__A, __A )
UpperCAmelCase : List[Any] = model(__A, noise=__A )
UpperCAmelCase : str = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__A )
UpperCAmelCase : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCAmelCase : str = model_class.from_config(model.config )
UpperCAmelCase : List[str] = new_model(__A ) # Build model
new_model.set_weights(model.get_weights() )
UpperCAmelCase : Tuple = new_model(__A, noise=__A )
self.assert_outputs_same(__A, __A )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __magic_name__ ( self : Optional[int] ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def __magic_name__ ( self : Tuple ):
pass
@slow
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(__A )
def a__ ( ) -> Dict:
UpperCAmelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : List[str] ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : str ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase : Tuple = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
UpperCAmelCase : List[str] = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : str = image_processor(images=__A, return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase : Optional[int] = ViTMAEConfig()
UpperCAmelCase : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCAmelCase : Optional[int] = model(**__A, noise=__A )
# verify the logits
UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : List[str] = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3], __A, atol=1E-4 )
| 336 | 0 |
def _lowerCAmelCase ( lowerCAmelCase_ :str )->str:
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 159 |
def a__ ( UpperCAmelCase : int ) -> int:
UpperCAmelCase : list[list[int]] = [[0 for _ in range(UpperCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , UpperCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_lowerCamelCase : List[Any] = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
_lowerCamelCase : str = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 336 | 0 |
"""simple docstring"""
def _A ( UpperCamelCase_ : int) -> bool:
'''simple docstring'''
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
from __future__ import annotations
def a__ ( UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Union[str, Any] = len(UpperCAmelCase )
# We need to create solution object to save path.
UpperCAmelCase : int = [[0 for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )]
UpperCAmelCase : Union[str, Any] = run_maze(UpperCAmelCase , 0 , 0 , UpperCAmelCase )
if solved:
print('''\n'''.join(str(UpperCAmelCase ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def a__ ( UpperCAmelCase : list[list[int]] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Dict = len(UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
UpperCAmelCase : Dict = 1
return True
UpperCAmelCase : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds
UpperCAmelCase : List[Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCAmelCase : Any = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCAmelCase : str = 1
# check for directions
if (
run_maze(UpperCAmelCase , i + 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j + 1 , UpperCAmelCase )
or run_maze(UpperCAmelCase , i - 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j - 1 , UpperCAmelCase )
):
return True
UpperCAmelCase : Any = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 0 |
from __future__ import annotations
def lowercase_ ( _A : float , _A : float , _A : float , ):
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 184 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __UpperCAmelCase :
def __init__( self : List[Any], __A : List[str], __A : List[str]=1_3, __A : Any=6_4, __A : Optional[Any]=2, __A : str=3, __A : str=True, __A : str=True, __A : Optional[Any]=3_2, __A : List[str]=5, __A : int=4, __A : str=3_7, __A : str="gelu", __A : Dict=0.1, __A : List[Any]=0.1, __A : Dict=1_0, __A : int=0.0_2, __A : Any=[1, 1_6, 4, 4], __A : Optional[int]=None, ):
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : Any = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : List[str] = patch_size
UpperCAmelCase : Dict = num_channels
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Union[str, Any] = num_hidden_layers
UpperCAmelCase : Optional[Any] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase : str = type_sequence_label_size
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : int = scope
UpperCAmelCase : List[str] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
UpperCAmelCase : str = (self.image_size // 3_2) ** 2
UpperCAmelCase : List[str] = num_patches + 1
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : str = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Any ):
UpperCAmelCase : Dict = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 1_6, 3_2],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, backbone_featmap_shape=self.backbone_featmap_shape, backbone_config=__A, )
def __magic_name__ ( self : Optional[int], __A : Optional[int], __A : int, __A : Tuple ):
UpperCAmelCase : int = ViTHybridModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Tuple = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Tuple, __A : Dict, __A : str, __A : List[str] ):
UpperCAmelCase : str = self.type_sequence_label_size
UpperCAmelCase : List[Any] = ViTHybridForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self : int ):
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = config_and_inputs
UpperCAmelCase : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Any = ViTHybridModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 )
def __magic_name__ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : int ):
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCAmelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A, nn.Linear ) )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(__A )
UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : str = [*signature.parameters.keys()]
UpperCAmelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = _config_zero_init(__A )
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(config=__A )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
UpperCAmelCase : Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@slow
def __magic_name__ ( self : List[str] ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Union[str, Any] = ViTHybridModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> Tuple:
UpperCAmelCase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : str ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__A )
UpperCAmelCase : Tuple = self.default_image_processor
UpperCAmelCase : int = prepare_img()
UpperCAmelCase : Union[str, Any] = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**__A )
# verify the logits
UpperCAmelCase : str = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Optional[Any] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@slow
@require_accelerate
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Union[str, Any] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''', device_map='''auto''' )
UpperCAmelCase : Tuple = prepare_img()
UpperCAmelCase : Optional[int] = image_processor(images=__A, return_tensors='''pt''' )
UpperCAmelCase : Dict = model(**__A )
UpperCAmelCase : Any = outputs.logits
# model predicts one of the 1000 ImageNet classes
UpperCAmelCase : Dict = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx], '''tabby, tabby cat''' )
| 336 | 0 |
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : list[int] , _UpperCamelCase : int ) -> bool:
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_UpperCamelCase ) )
def lowerCamelCase ( _UpperCamelCase : list[list[int]] , _UpperCamelCase : int , _UpperCamelCase : list[int] , _UpperCamelCase : int ) -> bool:
'''simple docstring'''
if index == len(_UpperCamelCase ):
return True
# Recursive Step
for i in range(_UpperCamelCase ):
if valid_coloring(graph[index] , _UpperCamelCase , _UpperCamelCase ):
# Color current vertex
__UpperCAmelCase : List[str] = i
# Validate coloring
if util_color(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , index + 1 ):
return True
# Backtrack
__UpperCAmelCase : Any = -1
return False
def lowerCamelCase ( _UpperCamelCase : list[list[int]] , _UpperCamelCase : int ) -> list[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [-1] * len(_UpperCamelCase )
if util_color(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , 0 ):
return colored_vertices
return []
| 115 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def a__ ( ) -> tuple[list[int], int]:
UpperCAmelCase : str = [randint(-1_000 , 1_000 ) for i in range(10 )]
UpperCAmelCase : Any = randint(-5_000 , 5_000 )
return (arr, r)
_lowerCamelCase : Any = make_dataset()
def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, ...]:
for triplet in permutations(UpperCAmelCase , 3 ):
if sum(UpperCAmelCase ) == target:
return tuple(sorted(UpperCAmelCase ) )
return (0, 0, 0)
def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, int, int]:
arr.sort()
UpperCAmelCase : Tuple = len(UpperCAmelCase )
for i in range(n - 1 ):
UpperCAmelCase , UpperCAmelCase : int = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def a__ ( ) -> tuple[float, float]:
UpperCAmelCase : Union[str, Any] = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
UpperCAmelCase : Tuple = '''
triplet_sum1(*dataset)
'''
UpperCAmelCase : List[str] = '''
triplet_sum2(*dataset)
'''
UpperCAmelCase : Tuple = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 )
UpperCAmelCase : str = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 )
return (min(UpperCAmelCase ), min(UpperCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCamelCase : int = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 336 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=3_0 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=3_2 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=0.6 , lowerCAmelCase__=None , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = mask_ratio
__SCREAMING_SNAKE_CASE = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = TFViTMAEModel(config=__A)
__SCREAMING_SNAKE_CASE = model(__A , training=__A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = TFViTMAEForPreTraining(__A)
__SCREAMING_SNAKE_CASE = model(__A , training=__A)
# expected sequence length = num_patches
__SCREAMING_SNAKE_CASE = (self.image_size // self.patch_size) ** 2
__SCREAMING_SNAKE_CASE = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = TFViTMAEForPreTraining(__A)
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__SCREAMING_SNAKE_CASE = model(__A , training=__A)
__SCREAMING_SNAKE_CASE = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(__SCREAMING_SNAKE_CASE) = config_and_inputs
__SCREAMING_SNAKE_CASE = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__lowercase : str = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__lowercase : Tuple = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
__lowercase : Dict = False
__lowercase : Dict = False
__lowercase : str = False
__lowercase : List[str] = False
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFViTMAEModelTester(self)
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=3_7)
def snake_case_ ( self):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""")
def snake_case_ ( self):
pass
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(__A)
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer))
__SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , tf.keras.layers.Layer))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(__A)
__SCREAMING_SNAKE_CASE = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __A)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A)
def snake_case_ ( self):
# make the mask reproducible
np.random.seed(2)
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = int((config.image_size // config.patch_size) ** 2)
__SCREAMING_SNAKE_CASE = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(__A)
__SCREAMING_SNAKE_CASE = self._prepare_for_class(__A , __A)
__SCREAMING_SNAKE_CASE = model(__A , noise=__A)
__SCREAMING_SNAKE_CASE = copy.deepcopy(self._prepare_for_class(__A , __A))
__SCREAMING_SNAKE_CASE = model(**__A , noise=__A)
__SCREAMING_SNAKE_CASE = outputs_dict[0].numpy()
__SCREAMING_SNAKE_CASE = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords)) , 1E-6)
def snake_case_ ( self):
# make the mask reproducible
np.random.seed(2)
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = int((config.image_size // config.patch_size) ** 2)
__SCREAMING_SNAKE_CASE = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
def prepare_numpy_arrays(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__A):
__SCREAMING_SNAKE_CASE = v.numpy()
else:
__SCREAMING_SNAKE_CASE = np.array(__A)
return inputs_np_dict
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(__A)
__SCREAMING_SNAKE_CASE = self._prepare_for_class(__A , __A)
__SCREAMING_SNAKE_CASE = prepare_numpy_arrays(__A)
__SCREAMING_SNAKE_CASE = model(__A , noise=__A)
__SCREAMING_SNAKE_CASE = model(**__A , noise=__A)
self.assert_outputs_same(__A , __A)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
# make masks reproducible
np.random.seed(2)
__SCREAMING_SNAKE_CASE = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2)
__SCREAMING_SNAKE_CASE = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
__SCREAMING_SNAKE_CASE = tf.constant(__A)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__SCREAMING_SNAKE_CASE = tf_noise
super().check_pt_tf_models(__A , __A , __A)
def snake_case_ ( self):
# make mask reproducible
np.random.seed(2)
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__),)
for module_member_name in dir(__A)
if module_member_name.endswith("""MainLayer""")
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""")] == model_class.__name__[: -len("""Model""")]
for module_member in (getattr(__A , __A),)
if isinstance(__A , __A)
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__A , """_keras_serializable""" , __A)
}
__SCREAMING_SNAKE_CASE = int((config.image_size // config.patch_size) ** 2)
__SCREAMING_SNAKE_CASE = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(__A)
inputs_dict.update({"""noise""": noise})
for main_layer_class in tf_main_layer_classes:
__SCREAMING_SNAKE_CASE = main_layer_class(__A)
__SCREAMING_SNAKE_CASE = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype) for name, tensor in inputs_dict.items()
}
__SCREAMING_SNAKE_CASE = tf.keras.Model(__A , outputs=main_layer(__A))
__SCREAMING_SNAKE_CASE = model(__A)
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = os.path.join(__A , """keras_model.h5""")
model.save(__A)
__SCREAMING_SNAKE_CASE = tf.keras.models.load_model(
__A , custom_objects={main_layer_class.__name__: main_layer_class})
assert isinstance(__A , tf.keras.Model)
__SCREAMING_SNAKE_CASE = model(__A)
self.assert_outputs_same(__A , __A)
@slow
def snake_case_ ( self):
# make mask reproducible
np.random.seed(2)
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = int((config.image_size // config.patch_size) ** 2)
__SCREAMING_SNAKE_CASE = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(__A)
__SCREAMING_SNAKE_CASE = self._prepare_for_class(__A , __A)
__SCREAMING_SNAKE_CASE = model(__A , noise=__A)
if model_class.__name__ == "TFViTMAEModel":
__SCREAMING_SNAKE_CASE = outputs.last_hidden_state.numpy()
__SCREAMING_SNAKE_CASE = 0
else:
__SCREAMING_SNAKE_CASE = outputs.logits.numpy()
__SCREAMING_SNAKE_CASE = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A , saved_model=__A)
__SCREAMING_SNAKE_CASE = model_class.from_pretrained(__A)
__SCREAMING_SNAKE_CASE = model(__A , noise=__A)
if model_class.__name__ == "TFViTMAEModel":
__SCREAMING_SNAKE_CASE = after_outputs['''last_hidden_state'''].numpy()
__SCREAMING_SNAKE_CASE = 0
else:
__SCREAMING_SNAKE_CASE = after_outputs['''logits'''].numpy()
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(__A , 1E-5)
def snake_case_ ( self):
# make mask reproducible
np.random.seed(2)
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = int((config.image_size // config.patch_size) ** 2)
__SCREAMING_SNAKE_CASE = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(__A)
__SCREAMING_SNAKE_CASE = self._prepare_for_class(__A , __A)
__SCREAMING_SNAKE_CASE = model(__A , noise=__A)
__SCREAMING_SNAKE_CASE = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__A)
__SCREAMING_SNAKE_CASE = model_class.from_config(model.get_config())
# make sure it also accepts a normal config
__SCREAMING_SNAKE_CASE = model_class.from_config(model.config)
__SCREAMING_SNAKE_CASE = new_model(__A) # Build model
new_model.set_weights(model.get_weights())
__SCREAMING_SNAKE_CASE = new_model(__A , noise=__A)
self.assert_outputs_same(__A , __A)
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""")
def snake_case_ ( self):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""")
def snake_case_ ( self):
pass
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""")
self.assertIsNotNone(__A)
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case_ ( self):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""") if is_vision_available() else None
@slow
def snake_case_ ( self):
# make random mask reproducible across the PT and TF model
np.random.seed(2)
__SCREAMING_SNAKE_CASE = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""")
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=__A , return_tensors="""tf""")
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__SCREAMING_SNAKE_CASE = ViTMAEConfig()
__SCREAMING_SNAKE_CASE = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
__SCREAMING_SNAKE_CASE = np.random.uniform(size=(1, num_patches))
# forward pass
__SCREAMING_SNAKE_CASE = model(**__A , noise=__A)
# verify the logits
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor([1, 1_9_6, 7_6_8])
self.assertEqual(outputs.logits.shape , __A)
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]])
tf.debugging.assert_near(outputs.logits[0, :3, :3] , __A , atol=1E-4)
| 100 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __UpperCAmelCase :
def __magic_name__ ( self : int, __A : Dict ):
raise NotImplementedError()
def __magic_name__ ( self : int ):
raise NotImplementedError()
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : str, __A : "AutoTokenizer", __A : bool = False, **__A : str ):
UpperCAmelCase : List[str] = tokenizer
UpperCAmelCase : str = skip_prompt
UpperCAmelCase : List[str] = decode_kwargs
# variables used in the streaming process
UpperCAmelCase : Dict = []
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = True
def __magic_name__ ( self : Dict, __A : Optional[int] ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
UpperCAmelCase : Union[str, Any] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
UpperCAmelCase : Optional[int] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
UpperCAmelCase : Any = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
UpperCAmelCase : Union[str, Any] = text[self.print_len :]
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
# If the last token is a CJK character, we print the characters.
elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
UpperCAmelCase : Union[str, Any] = text[self.print_len :]
self.print_len += len(__A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
UpperCAmelCase : Optional[Any] = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(__A )
self.on_finalized_text(__A )
def __magic_name__ ( self : str ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
UpperCAmelCase : int = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
UpperCAmelCase : Dict = text[self.print_len :]
UpperCAmelCase : List[Any] = []
UpperCAmelCase : List[Any] = 0
else:
UpperCAmelCase : Dict = ''''''
UpperCAmelCase : str = True
self.on_finalized_text(__A, stream_end=__A )
def __magic_name__ ( self : List[str], __A : str, __A : bool = False ):
print(__A, flush=__A, end='''''' if not stream_end else None )
def __magic_name__ ( self : List[Any], __A : Optional[int] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Dict, __A : "AutoTokenizer", __A : bool = False, __A : Optional[float] = None, **__A : str ):
super().__init__(__A, __A, **__A )
UpperCAmelCase : Dict = Queue()
UpperCAmelCase : Any = None
UpperCAmelCase : Any = timeout
def __magic_name__ ( self : Dict, __A : str, __A : bool = False ):
self.text_queue.put(__A, timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal, timeout=self.timeout )
def __iter__( self : int ):
return self
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : List[Any] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 336 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 |
import numpy
# List of input, output pairs
_lowerCamelCase : Dict = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
_lowerCamelCase : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
_lowerCamelCase : Dict = [2, 4, 1, 5]
_lowerCamelCase : Dict = len(train_data)
_lowerCamelCase : int = 0.0_0_9
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]="train" ) -> Dict:
return calculate_hypothesis_value(UpperCAmelCase , UpperCAmelCase ) - output(
UpperCAmelCase , UpperCAmelCase )
def a__ ( UpperCAmelCase : int ) -> Any:
UpperCAmelCase : str = 0
for i in range(len(UpperCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> Optional[int]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def a__ ( UpperCAmelCase : int , UpperCAmelCase : Optional[Any] ) -> List[str]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : str=m ) -> Dict:
UpperCAmelCase : Optional[int] = 0
for i in range(UpperCAmelCase ):
if index == -1:
summation_value += _error(UpperCAmelCase )
else:
summation_value += _error(UpperCAmelCase ) * train_data[i][0][index]
return summation_value
def a__ ( UpperCAmelCase : Dict ) -> Dict:
UpperCAmelCase : Dict = summation_of_cost_derivative(UpperCAmelCase , UpperCAmelCase ) / m
return cost_derivative_value
def a__ ( ) -> List[Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase : List[str] = 0.000002
UpperCAmelCase : Any = 0
UpperCAmelCase : Dict = 0
while True:
j += 1
UpperCAmelCase : List[Any] = [0, 0, 0, 0]
for i in range(0 , len(UpperCAmelCase ) ):
UpperCAmelCase : List[str] = get_cost_derivative(i - 1 )
UpperCAmelCase : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCAmelCase , UpperCAmelCase , atol=UpperCAmelCase , rtol=UpperCAmelCase , ):
break
UpperCAmelCase : int = temp_parameter_vector
print(('''Number of iterations:''', j) )
def a__ ( ) -> List[Any]:
for i in range(len(UpperCAmelCase ) ):
print(('''Actual output value:''', output(UpperCAmelCase , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(UpperCAmelCase , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 336 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class lowercase__ ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = """vit_msn"""
def __init__( self , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=1e-06 , __snake_case=224 , __snake_case=16 , __snake_case=3 , __snake_case=True , **__snake_case , ):
super().__init__(**__A )
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
_SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
_SCREAMING_SNAKE_CASE : int = intermediate_size
_SCREAMING_SNAKE_CASE : str = hidden_act
_SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = initializer_range
_SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
_SCREAMING_SNAKE_CASE : Optional[int] = image_size
_SCREAMING_SNAKE_CASE : Tuple = patch_size
_SCREAMING_SNAKE_CASE : str = num_channels
_SCREAMING_SNAKE_CASE : List[str] = qkv_bias
| 200 |
def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Optional[Any]:
UpperCAmelCase : List[str] = 0
UpperCAmelCase : List[Any] = len(UpperCAmelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCAmelCase ):
return None
UpperCAmelCase : Optional[Any] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
UpperCAmelCase : Any = left
UpperCAmelCase : List[str] = point
elif point > right:
UpperCAmelCase : Any = right
UpperCAmelCase : List[str] = point
else:
if item < current_item:
UpperCAmelCase : Optional[int] = point - 1
else:
UpperCAmelCase : str = point + 1
return None
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] ) -> Dict:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCAmelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
elif point > right:
return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , point - 1 )
else:
return interpolation_search_by_recursion(
UpperCAmelCase , UpperCAmelCase , point + 1 , UpperCAmelCase )
def a__ ( UpperCAmelCase : Union[str, Any] ) -> int:
if collection != sorted(UpperCAmelCase ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
_lowerCamelCase : Optional[int] = 0
if debug == 1:
_lowerCamelCase : Dict = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
_lowerCamelCase : List[Any] = 6_7
_lowerCamelCase : Optional[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 336 | 0 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowercase__ = ""
lowercase__ = ""
lowercase__ = ""
lowercase__ = 1 # (0 is vertical, 1 is horizontal)
def __a ( ) ->None:
a__: str = get_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print('Processing...' )
a__: List[Any] = update_image_and_anno(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for index, image in enumerate(_SCREAMING_SNAKE_CASE ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
a__: int = random_chars(32 )
a__: int = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
a__: List[str] = F'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(F'/{file_root}.jpg' , _SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'Success {index+1}/{len(_SCREAMING_SNAKE_CASE )} with {file_name}' )
a__: List[str] = []
for anno in new_annos[index]:
a__: str = F'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(_SCREAMING_SNAKE_CASE )
with open(F'/{file_root}.txt' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->tuple[list, list]:
a__: Tuple = []
a__: Union[str, Any] = []
for label_file in glob.glob(os.path.join(_SCREAMING_SNAKE_CASE , '*.txt' ) ):
a__: Optional[Any] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(_SCREAMING_SNAKE_CASE ) as in_file:
a__: Any = in_file.readlines()
a__: int = os.path.join(_SCREAMING_SNAKE_CASE , F'{label_name}.jpg' )
a__: str = []
for obj_list in obj_lists:
a__: str = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_SCREAMING_SNAKE_CASE )
labels.append(_SCREAMING_SNAKE_CASE )
return img_paths, labels
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 ) ->tuple[list, list, list]:
a__: Union[str, Any] = []
a__: List[str] = []
a__: Optional[Any] = []
for idx in range(len(_SCREAMING_SNAKE_CASE ) ):
a__: Dict = []
a__: int = img_list[idx]
path_list.append(_SCREAMING_SNAKE_CASE )
a__: int = anno_list[idx]
a__: Any = cva.imread(_SCREAMING_SNAKE_CASE )
if flip_type == 1:
a__: Any = cva.flip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for bbox in img_annos:
a__: Any = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
a__: int = cva.flip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for bbox in img_annos:
a__: str = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_SCREAMING_SNAKE_CASE )
new_imgs_list.append(_SCREAMING_SNAKE_CASE )
return new_imgs_list, new_annos_lists, path_list
def __a ( _SCREAMING_SNAKE_CASE = 32 ) ->str:
assert number_char > 1, "The number of character should greater than 1"
a__: Dict = ascii_lowercase + digits
return "".join(random.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 290 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : List[str]=False ) -> Any:
UpperCAmelCase : Optional[int] = '''backbone.''' if is_semantic else ''''''
UpperCAmelCase : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', '''beit.embeddings.cls_token'''),
(f'''{prefix}patch_embed.proj.weight''', '''beit.embeddings.patch_embeddings.projection.weight'''),
(f'''{prefix}patch_embed.proj.bias''', '''beit.embeddings.patch_embeddings.projection.bias'''),
(f'''{prefix}pos_embed''', '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str=False , UpperCAmelCase : Dict=False ) -> Any:
for i in range(config.num_hidden_layers ):
UpperCAmelCase : Tuple = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
UpperCAmelCase : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
UpperCAmelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase : str = q_bias
UpperCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : int = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCAmelCase : int = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
UpperCAmelCase : str = gamma_a
UpperCAmelCase : Dict = gamma_a
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase )
UpperCAmelCase : str = val
def a__ ( ) -> Optional[int]:
UpperCAmelCase : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Union[str, Any] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any]=False ) -> Union[str, Any]:
UpperCAmelCase : Dict = False if '''rvlcdip''' in checkpoint_url else True
UpperCAmelCase : Any = BeitConfig(use_absolute_position_embeddings=UpperCAmelCase , use_mask_token=UpperCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCAmelCase : List[Any] = 1_024
UpperCAmelCase : Optional[Any] = 4_096
UpperCAmelCase : Any = 24
UpperCAmelCase : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCAmelCase : Optional[Any] = 16
UpperCAmelCase : List[Any] = '''huggingface/label-files'''
UpperCAmelCase : Any = '''rvlcdip-id2label.json'''
UpperCAmelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Dict = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : Union[str, Any] = idalabel
UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCAmelCase : Tuple = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='''cpu''' )['''model''']
UpperCAmelCase : List[str] = create_rename_keys(UpperCAmelCase , has_lm_head=UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
read_in_q_k_v(UpperCAmelCase , UpperCAmelCase , has_lm_head=UpperCAmelCase )
# load HuggingFace model
UpperCAmelCase : Tuple = BeitForMaskedImageModeling(UpperCAmelCase ) if has_lm_head else BeitForImageClassification(UpperCAmelCase )
model.eval()
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image
UpperCAmelCase : Dict = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase )
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : Optional[Any] = image_processor(images=UpperCAmelCase , return_tensors='''pt''' )
UpperCAmelCase : str = encoding['''pixel_values''']
UpperCAmelCase : Any = model(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify logits
UpperCAmelCase : List[Any] = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(UpperCAmelCase ), "Shape of logits not as expected"
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
if has_lm_head:
UpperCAmelCase : List[Any] = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
UpperCAmelCase : Any = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCAmelCase , )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 336 | 0 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_A = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowerCamelCase ( datasets.BuilderConfig ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
def a__ ( lowerCAmelCase , lowerCAmelCase , ) -> Optional[int]:
import pyspark
def generate_fn():
UpperCAmelCase__ : Any = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
UpperCAmelCase__ : Optional[Any] = df_with_partition_id.select("""*""" ).where(F"""part_id = {partition_id}""" ).drop("""part_id""" )
UpperCAmelCase__ : Optional[Any] = partition_df.collect()
UpperCAmelCase__ : Optional[int] = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class lowerCamelCase ( _BaseExamplesIterable ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=None , ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = df
UpperCAmelCase__ : Optional[int] = partition_order or range(self.df.rdd.getNumPartitions() )
UpperCAmelCase__ : str = _generate_iterable_examples(self.df , self.partition_order )
def __iter__(self ):
"""simple docstring"""
yield from self.generate_examples_fn()
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__A )
return SparkExamplesIterable(self.df , partition_order=__A )
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = self.split_shard_indices_by_worker(__A , __A )
return SparkExamplesIterable(self.df , partition_order=__A )
@property
def _a (self ):
"""simple docstring"""
return len(self.partition_order )
class lowerCamelCase ( datasets.DatasetBuilder ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = SparkConfig
def __init__(self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
import pyspark
UpperCAmelCase__ : List[str] = pyspark.sql.SparkSession.builder.getOrCreate()
UpperCAmelCase__ : Optional[Any] = df
UpperCAmelCase__ : Tuple = working_dir
super().__init__(
cache_dir=__A , config_name=str(self.df.semanticHash() ) , **__A , )
def _a (self ):
"""simple docstring"""
def create_cache_and_write_probe(_lowerCamelCase ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__A )
UpperCAmelCase__ : str = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__A , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
UpperCAmelCase__ : Tuple = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__A ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def _a (self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _a (self , _lowerCamelCase ):
"""simple docstring"""
import pyspark
def get_arrow_batch_size(_lowerCamelCase ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
UpperCAmelCase__ : List[str] = self.df.count()
UpperCAmelCase__ : Optional[int] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
UpperCAmelCase__ : List[str] = (
self.df.limit(__A )
.repartition(1 )
.mapInArrow(__A , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
UpperCAmelCase__ : Optional[int] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
UpperCAmelCase__ : Tuple = min(__A , int(approx_total_size / max_shard_size ) )
UpperCAmelCase__ : Union[str, Any] = self.df.repartition(__A )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
"""simple docstring"""
import pyspark
UpperCAmelCase__ : Tuple = ParquetWriter if file_format == '''parquet''' else ArrowWriter
UpperCAmelCase__ : Dict = os.path.join(self._working_dir , os.path.basename(__A ) ) if self._working_dir else fpath
UpperCAmelCase__ : Optional[int] = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
UpperCAmelCase__ : str = self.config.features
UpperCAmelCase__ : Tuple = self._writer_batch_size
UpperCAmelCase__ : int = self._fs.storage_options
def write_arrow(_lowerCamelCase ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
UpperCAmelCase__ : Any = pyspark.TaskContext().taskAttemptId()
UpperCAmelCase__ : Tuple = next(__A , __A )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Optional[Any] = writer_class(
features=__A , path=working_fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , writer_batch_size=__A , storage_options=__A , embed_local_files=__A , )
UpperCAmelCase__ : int = pa.Table.from_batches([first_batch] )
writer.write_table(__A )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
UpperCAmelCase__ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
UpperCAmelCase__ : int = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , writer_batch_size=__A , storage_options=__A , embed_local_files=__A , )
UpperCAmelCase__ : List[str] = pa.Table.from_batches([batch] )
writer.write_table(__A )
if writer._num_bytes > 0:
UpperCAmelCase__ : Optional[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__A ) ):
UpperCAmelCase__ : Any = os.path.join(os.path.dirname(__A ) , os.path.basename(__A ) )
shutil.move(__A , __A )
UpperCAmelCase__ : str = (
self.df.mapInArrow(__A , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _a (self , _lowerCamelCase , _lowerCamelCase = "arrow" , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
self._validate_cache_dir()
UpperCAmelCase__ : List[Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__A )
UpperCAmelCase__ : Dict = not is_remote_filesystem(self._fs )
UpperCAmelCase__ : Any = os.path.join if is_local else posixpath.join
UpperCAmelCase__ : int = '''-TTTTT-SSSSS-of-NNNNN'''
UpperCAmelCase__ : Optional[Any] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
UpperCAmelCase__ : Tuple = path_join(self._output_dir , __A )
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Any = 0
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : int = []
UpperCAmelCase__ : Union[str, Any] = []
for task_id, content in self._prepare_split_single(__A , __A , __A ):
(
UpperCAmelCase__
) : str = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__A )
UpperCAmelCase__ : List[str] = total_num_examples
UpperCAmelCase__ : int = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
UpperCAmelCase__ : Dict = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
UpperCAmelCase__ : Any = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
rename(
__A , fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , fpath.replace("""TTTTT-SSSSS""" , F"""{global_shard_id:05d}""" ).replace("""NNNNN""" , F"""{total_shards:05d}""" ) , )
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Tuple = 0
for i in range(len(__A ) ):
UpperCAmelCase__ : Union[str, Any] = task_id_and_num_shards[i]
for shard_id in range(__A ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__A , len(__A ) ).map(lambda _lowerCamelCase : _rename_shard(*__A ) ).collect()
else:
# don't use any pattern
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : int = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , fpath.replace(__A , """""" ) , )
def _a (self , _lowerCamelCase , ):
"""simple docstring"""
return SparkExamplesIterable(self.df )
| 171 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[int], __A : Optional[int], __A : Any=1_3, __A : str=7, __A : Optional[int]=True, __A : Tuple=True, __A : Union[str, Any]=True, __A : Any=True, __A : Optional[int]=9_9, __A : Tuple=3_2, __A : str=5, __A : Union[str, Any]=4, __A : List[str]=3_7, __A : Tuple="gelu", __A : Optional[int]=0.1, __A : int=0.1, __A : Optional[Any]=5_1_2, __A : int=1_6, __A : Optional[Any]=2, __A : Union[str, Any]=0.0_2, __A : Optional[int]=4, ):
UpperCAmelCase : Any = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Any = seq_length
UpperCAmelCase : Tuple = is_training
UpperCAmelCase : str = use_attention_mask
UpperCAmelCase : List[str] = use_token_type_ids
UpperCAmelCase : int = use_labels
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : List[Any] = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = num_choices
def __magic_name__ ( self : str ):
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase : List[Any] = None
if self.use_attention_mask:
UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Any = None
if self.use_token_type_ids:
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = RobertaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__A, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def __magic_name__ ( self : int ):
UpperCAmelCase : Any = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = config_and_inputs
UpperCAmelCase : Any = True
UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = True
UpperCamelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Dict = FlaxRobertaModelTester(self )
@slow
def __magic_name__ ( self : Any ):
for model_class_name in self.all_model_classes:
UpperCAmelCase : Dict = model_class_name.from_pretrained('''roberta-base''', from_pt=__A )
UpperCAmelCase : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
| 336 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = 0
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(__A, __A )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ = Path(__A ) / '''preprocessor_config.json'''
lowerCAmelCase_ = Path(__A ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''}, open(__A, '''w''' ), )
json.dump({'''model_type''': '''clip'''}, open(__A, '''w''' ) )
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(__A )
self.assertIsInstance(__A, __A )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ = Path(__A ) / '''preprocessor_config.json'''
lowerCAmelCase_ = Path(__A ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''}, open(__A, '''w''' ), )
json.dump({'''model_type''': '''clip'''}, open(__A, '''w''' ) )
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(__A )
self.assertIsInstance(__A, __A )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowerCAmelCase_ = Path(__A ) / '''preprocessor_config.json'''
lowerCAmelCase_ = Path(__A ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''}, open(__A, '''w''' ), )
json.dump({'''model_type''': '''clip'''}, open(__A, '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(__A ).to_dict()
config_dict.pop('''image_processor_type''' )
lowerCAmelCase_ = CLIPImageProcessor(**__A )
# save in new folder
model_config.save_pretrained(__A )
config.save_pretrained(__A )
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(__A )
# make sure private variable is not incorrectly saved
lowerCAmelCase_ = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__A, __A )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ = Path(__A ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''}, open(__A, '''w''' ), )
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(__A )
self.assertIsInstance(__A, __A )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
__A, '''clip-base is not a local folder and is not a valid model identifier''' ):
lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''clip-base''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
__A, R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(__A, revision='''aaaaaa''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
__A, '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''', ):
lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with self.assertRaises(__A ):
lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''', trust_remote_code=__A )
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''', trust_remote_code=__A )
self.assertEqual(image_processor.__class__.__name__, '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__A )
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(__A, trust_remote_code=__A )
self.assertEqual(reloaded_image_processor.__class__.__name__, '''NewImageProcessor''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
try:
AutoConfig.register('''custom''', __A )
AutoImageProcessor.register(__A, __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoImageProcessor.register(__A, __A )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ = Path(__A ) / '''preprocessor_config.json'''
lowerCAmelCase_ = Path(__A ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''}, open(__A, '''w''' ), )
json.dump({'''model_type''': '''clip'''}, open(__A, '''w''' ) )
lowerCAmelCase_ = CustomImageProcessor.from_pretrained(__A )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__A )
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(__A )
self.assertIsInstance(__A, __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
class A ( lowerCamelCase__ ):
__snake_case = True
try:
AutoConfig.register('''custom''', __A )
AutoImageProcessor.register(__A, __A )
# If remote code is not set, the default is to use local
lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__, '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''', trust_remote_code=__A )
self.assertEqual(image_processor.__class__.__name__, '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''', trust_remote_code=__A )
self.assertEqual(image_processor.__class__.__name__, '''NewImageProcessor''' )
self.assertTrue(not hasattr(__A, '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 278 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Dict = {"vocab_file": "vocab.txt"}
_lowerCamelCase : List[str] = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_lowerCamelCase : List[Any] = {
"facebook/esm2_t6_8M_UR50D": 1_0_2_4,
"facebook/esm2_t12_35M_UR50D": 1_0_2_4,
}
def a__ ( UpperCAmelCase : List[str] ) -> Any:
with open(UpperCAmelCase , '''r''' ) as f:
UpperCAmelCase : Dict = f.read().splitlines()
return [l.strip() for l in lines]
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Any, __A : Dict, __A : List[Any]="<unk>", __A : List[str]="<cls>", __A : Any="<pad>", __A : Union[str, Any]="<mask>", __A : int="<eos>", **__A : Tuple, ):
super().__init__(**__A )
UpperCAmelCase : Tuple = load_vocab_file(__A )
UpperCAmelCase : List[Any] = dict(enumerate(self.all_tokens ) )
UpperCAmelCase : str = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCAmelCase : Union[str, Any] = unk_token
UpperCAmelCase : Optional[Any] = cls_token
UpperCAmelCase : Optional[int] = pad_token
UpperCAmelCase : Optional[int] = mask_token
UpperCAmelCase : List[str] = eos_token
UpperCAmelCase : Optional[Any] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __magic_name__ ( self : Tuple, __A : int ):
return self._id_to_token.get(__A, self.unk_token )
def __magic_name__ ( self : List[Any], __A : str ):
return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) )
def __magic_name__ ( self : Any, __A : Optional[Any], **__A : Union[str, Any] ):
return text.split()
def __magic_name__ ( self : Optional[int], __A : Dict=False ):
return len(self._id_to_token )
def __magic_name__ ( self : int ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __magic_name__ ( self : Tuple, __A : str ):
return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) )
def __magic_name__ ( self : Any, __A : int ):
return self._id_to_token.get(__A, self.unk_token )
def __magic_name__ ( self : Union[str, Any], __A : List[int], __A : Optional[List[int]] = None ):
UpperCAmelCase : Optional[int] = [self.cls_token_id]
UpperCAmelCase : Optional[int] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __magic_name__ ( self : Any, __A : List, __A : Optional[List] = None, __A : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCAmelCase : Dict = [1] + ([0] * len(__A )) + [1]
if token_ids_a is not None:
mask += [0] * len(__A ) + [1]
return mask
def __magic_name__ ( self : Optional[int], __A : List[Any], __A : Dict ):
UpperCAmelCase : Union[str, Any] = os.path.join(__A, (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(__A, '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __magic_name__ ( self : Dict ):
return self.get_vocab_size(with_added_tokens=__A )
def __magic_name__ ( self : Optional[int], __A : Union[List[str], List[AddedToken]], __A : bool = False ):
return super()._add_tokens(__A, special_tokens=__A )
| 336 | 0 |
from __future__ import annotations
import math
__snake_case = "2020.9.26"
__snake_case = "xcodz-dot, cclaus, dhruvmanila"
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> tuple[float, float]:
"""simple docstring"""
if not all(isinstance(_lowercase , (float, int) ) for val in locals().values() ):
__UpperCamelCase = f'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(_lowercase )
__UpperCamelCase = ((x * distance) / (z + distance)) * scale
__UpperCamelCase = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> tuple[float, float, float]:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
raise TypeError('Axis must be a str' )
__UpperCamelCase = locals()
del input_variables["axis"]
if not all(isinstance(_lowercase , (float, int) ) for val in input_variables.values() ):
__UpperCamelCase = (
'''Input values except axis must either be float or int: '''
f'''{list(input_variables.values() )}'''
)
raise TypeError(_lowercase )
__UpperCamelCase = (angle % 3_60) / 4_50 * 1_80 / math.pi
if axis == "z":
__UpperCamelCase = x * math.cos(_lowercase ) - y * math.sin(_lowercase )
__UpperCamelCase = y * math.cos(_lowercase ) + x * math.sin(_lowercase )
__UpperCamelCase = z
elif axis == "x":
__UpperCamelCase = y * math.cos(_lowercase ) - z * math.sin(_lowercase )
__UpperCamelCase = z * math.cos(_lowercase ) + y * math.sin(_lowercase )
__UpperCamelCase = x
elif axis == "y":
__UpperCamelCase = x * math.cos(_lowercase ) - z * math.sin(_lowercase )
__UpperCamelCase = z * math.cos(_lowercase ) + x * math.sin(_lowercase )
__UpperCamelCase = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }""")
print(f"""{rotate(1.0, 2.0, 3.0, 'y', 9_0.0) = }""")
| 310 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A, '''tf_padding''' ) )
self.parent.assertTrue(hasattr(__A, '''depth_multiplier''' ) )
class __UpperCAmelCase :
def __init__( self : int, __A : List[Any], __A : str=1_3, __A : Dict=3, __A : int=3_2, __A : int=0.2_5, __A : List[str]=8, __A : int=8, __A : Dict=6, __A : str=3_2, __A : Any=True, __A : str=True, __A : int=True, __A : Union[str, Any]="relu6", __A : Any=1_2_8_0, __A : List[Any]=0.1, __A : Optional[Any]=0.0_2, __A : Tuple=True, __A : List[Any]=True, __A : str=1_0, __A : Optional[Any]=None, ):
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : str = image_size
UpperCAmelCase : Optional[int] = depth_multiplier
UpperCAmelCase : Union[str, Any] = depth_divisible_by
UpperCAmelCase : Optional[Any] = min_depth
UpperCAmelCase : List[str] = expand_ratio
UpperCAmelCase : Dict = tf_padding
UpperCAmelCase : str = output_stride
UpperCAmelCase : Union[str, Any] = first_layer_is_expansion
UpperCAmelCase : List[Any] = finegrained_output
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCAmelCase : Optional[Any] = classifier_dropout_prob
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : Tuple = num_labels
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Any = scope
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Dict = None
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size], self.num_labels )
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ ( self : Any ):
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, depth_divisible_by=self.depth_divisible_by, min_depth=self.min_depth, expand_ratio=self.expand_ratio, output_stride=self.output_stride, first_layer_is_expansion=self.first_layer_is_expansion, finegrained_output=self.finegrained_output, hidden_act=self.hidden_act, tf_padding=self.tf_padding, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def __magic_name__ ( self : List[Any], __A : Dict, __A : Optional[Any], __A : Optional[int], __A : Union[str, Any] ):
UpperCAmelCase : Any = MobileNetVaModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[Any] = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
self.parent.assertEqual(
result.pooler_output.shape, (self.batch_size, self.last_hidden_size), )
def __magic_name__ ( self : str, __A : Union[str, Any], __A : Dict, __A : Optional[Any], __A : str ):
UpperCAmelCase : Optional[int] = self.num_labels
UpperCAmelCase : Any = MobileNetVaForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[int] = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[Any], __A : Optional[Any], __A : List[str], __A : Dict, __A : Dict ):
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Dict = MobileNetVaForSemanticSegmentation(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
UpperCAmelCase : Optional[Any] = model(__A, labels=__A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = config_and_inputs
UpperCAmelCase : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[Any] = MobileNetVaModelTester(self )
UpperCAmelCase : List[Any] = MobileNetVaConfigTester(self, config_class=__A, has_text_modality=__A )
def __magic_name__ ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def __magic_name__ ( self : Optional[int] ):
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def __magic_name__ ( self : Tuple ):
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def __magic_name__ ( self : Any ):
pass
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(__A )
UpperCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : int ):
def check_hidden_states_output(__A : Any, __A : Optional[Any], __A : str ):
UpperCAmelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(__A, __A ) )
UpperCAmelCase : Optional[Any] = outputs.hidden_states
UpperCAmelCase : List[Any] = 1_6
self.assertEqual(len(__A ), __A )
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : int ):
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
@slow
def __magic_name__ ( self : Dict ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] = MobileNetVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> int:
UpperCAmelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : List[Any] ):
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : List[Any] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(__A )
UpperCAmelCase : Optional[int] = self.default_image_processor
UpperCAmelCase : Optional[Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : str = model(**__A )
# verify the logits
UpperCAmelCase : int = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Tuple = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@slow
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCAmelCase : List[Any] = model.to(__A )
UpperCAmelCase : Tuple = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : int = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(**__A )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify the logits
UpperCAmelCase : Tuple = torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape, __A )
UpperCAmelCase : Tuple = torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
], device=__A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], __A, atol=1E-4 ) )
| 336 | 0 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def lowerCAmelCase__ ( *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowerCAmelCase__ ( self : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ) -> int:
"""simple docstring"""
snake_case_ = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
snake_case_ = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def lowerCAmelCase__ ( self : Dict , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
snake_case_ = object_detector(examples[0] , threshold=0.0 )
snake_case_ = len(__A )
self.assertGreater(__A , 0 )
self.assertEqual(
__A , [
{
"score": ANY(__A ),
"label": ANY(__A ),
"box": {"xmin": ANY(__A ), "ymin": ANY(__A ), "xmax": ANY(__A ), "ymax": ANY(__A )},
}
for i in range(__A )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def lowerCAmelCase__ ( self : Any ) -> int:
"""simple docstring"""
pass
@require_torch
def lowerCAmelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
snake_case_ = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
snake_case_ = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__A , decimals=4 ) , [
{"score": 0.7_235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7_218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7_184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6_748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6_656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6_614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6_456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6_419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
] , )
snake_case_ = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__A , decimals=4 ) , [
[
{"score": 0.7_235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7_218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7_184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6_748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6_656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6_614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6_456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6_419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
]
] , )
@require_torch
@slow
def lowerCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case_ = pipeline("zero-shot-object-detection" )
snake_case_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(__A , decimals=4 ) , [
{"score": 0.2_868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1_474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
] , )
snake_case_ = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(__A , decimals=4 ) , [
[
{"score": 0.2_868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1_474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
[
{"score": 0.2_868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1_474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
pass
@require_torch
@slow
def lowerCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
snake_case_ = 0.2
snake_case_ = pipeline("zero-shot-object-detection" )
snake_case_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=__A , )
self.assertEqual(
nested_simplify(__A , decimals=4 ) , [
{"score": 0.2_868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
] , )
@require_torch
@slow
def lowerCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
snake_case_ = 2
snake_case_ = pipeline("zero-shot-object-detection" )
snake_case_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=__A , )
self.assertEqual(
nested_simplify(__A , decimals=4 ) , [
{"score": 0.2_868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
] , )
| 159 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """codegen"""
UpperCamelCase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any, __A : Optional[int]=5_0_4_0_0, __A : Tuple=2_0_4_8, __A : Optional[int]=2_0_4_8, __A : List[str]=4_0_9_6, __A : List[str]=2_8, __A : Union[str, Any]=1_6, __A : Tuple=6_4, __A : Union[str, Any]=None, __A : Union[str, Any]="gelu_new", __A : Any=0.0, __A : Dict=0.0, __A : str=0.0, __A : Optional[int]=1E-5, __A : Any=0.0_2, __A : Any=True, __A : Union[str, Any]=5_0_2_5_6, __A : List[str]=5_0_2_5_6, __A : int=False, **__A : List[Any], ):
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Tuple = n_ctx
UpperCAmelCase : Tuple = n_positions
UpperCAmelCase : Optional[int] = n_embd
UpperCAmelCase : Union[str, Any] = n_layer
UpperCAmelCase : List[str] = n_head
UpperCAmelCase : Tuple = n_inner
UpperCAmelCase : int = rotary_dim
UpperCAmelCase : List[Any] = activation_function
UpperCAmelCase : List[str] = resid_pdrop
UpperCAmelCase : Optional[Any] = embd_pdrop
UpperCAmelCase : str = attn_pdrop
UpperCAmelCase : Tuple = layer_norm_epsilon
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : Any = bos_token_id
UpperCAmelCase : List[str] = eos_token_id
super().__init__(
bos_token_id=__A, eos_token_id=__A, tie_word_embeddings=__A, **__A )
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Any, __A : PretrainedConfig, __A : str = "default", __A : List[PatchingSpec] = None, __A : bool = False, ):
super().__init__(__A, task=__A, patching_specs=__A, use_past=__A )
if not getattr(self._config, '''pad_token_id''', __A ):
# TODO: how to do that better?
UpperCAmelCase : Union[str, Any] = 0
@property
def __magic_name__ ( self : str ):
UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__A, direction='''inputs''' )
UpperCAmelCase : int = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __magic_name__ ( self : Dict ):
return self._config.n_layer
@property
def __magic_name__ ( self : List[str] ):
return self._config.n_head
def __magic_name__ ( self : str, __A : PreTrainedTokenizer, __A : int = -1, __A : int = -1, __A : bool = False, __A : Optional[TensorType] = None, ):
UpperCAmelCase : Union[str, Any] = super(__A, self ).generate_dummy_inputs(
__A, batch_size=__A, seq_length=__A, is_pair=__A, framework=__A )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase : str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase : str = seqlen + 2
UpperCAmelCase : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase : Optional[int] = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
UpperCAmelCase : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
UpperCAmelCase : Optional[Any] = ordered_inputs['''attention_mask'''].dtype
UpperCAmelCase : Dict = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__A, __A, dtype=__A )], dim=1 )
return ordered_inputs
@property
def __magic_name__ ( self : Tuple ):
return 1_3
| 336 | 0 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 17 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 336 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _lowercase :
"""simple docstring"""
A__ = None
A__ = None
A__ = None # sigma(t_i)
@classmethod
def lowerCAmelCase ( cls : Any ):
'''simple docstring'''
return cls()
@dataclass
class _lowercase ( lowerCamelCase__):
"""simple docstring"""
A__ = 42
A__ = 42
A__ = 42
class _lowercase ( lowerCamelCase__ , lowerCamelCase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self : Optional[int] , __lowerCamelCase : float = 0.0_2 , __lowerCamelCase : float = 100 , __lowerCamelCase : float = 1.0_0_7 , __lowerCamelCase : float = 80 , __lowerCamelCase : float = 0.0_5 , __lowerCamelCase : float = 50 , ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return KarrasVeSchedulerState.create()
def lowerCAmelCase ( self : int , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : int , __lowerCamelCase : Tuple = () ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = jnp.arange(0 , __A )[::-1].copy()
lowerCamelCase__ : Union[str, Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__A , schedule=jnp.array(__A , dtype=jnp.floataa ) , timesteps=__A , )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : float , __lowerCamelCase : random.KeyArray , ):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase__ : int = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase__ : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase__ : Union[str, Any] = random.split(__A , num=1 )
lowerCamelCase__ : List[str] = self.config.s_noise * random.normal(key=__A , shape=sample.shape )
lowerCamelCase__ : Tuple = sigma + gamma * sigma
lowerCamelCase__ : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : bool = True , ):
'''simple docstring'''
lowerCamelCase__ : int = sample_hat + sigma_hat * model_output
lowerCamelCase__ : Dict = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase__ : int = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A , derivative=__A , state=__A )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : bool = True , ):
'''simple docstring'''
lowerCamelCase__ : Tuple = sample_prev + sigma_prev * model_output
lowerCamelCase__ : List[str] = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase__ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A , derivative=__A , state=__A )
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError()
| 184 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __UpperCAmelCase :
# setable values
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None # sigma(t_i)
@classmethod
def __magic_name__ ( cls : Any ):
return cls()
@dataclass
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@property
def __magic_name__ ( self : Optional[int] ):
return True
@register_to_config
def __init__( self : Optional[int], __A : float = 0.0_2, __A : float = 1_0_0, __A : float = 1.0_0_7, __A : float = 8_0, __A : float = 0.0_5, __A : float = 5_0, ):
pass
def __magic_name__ ( self : Optional[Any] ):
return KarrasVeSchedulerState.create()
def __magic_name__ ( self : int, __A : KarrasVeSchedulerState, __A : int, __A : Tuple = () ):
UpperCAmelCase : Optional[Any] = jnp.arange(0, __A )[::-1].copy()
UpperCAmelCase : Union[str, Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__A, schedule=jnp.array(__A, dtype=jnp.floataa ), timesteps=__A, )
def __magic_name__ ( self : List[Any], __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : random.KeyArray, ):
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase : int = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1 )
else:
UpperCAmelCase : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase : Union[str, Any] = random.split(__A, num=1 )
UpperCAmelCase : List[str] = self.config.s_noise * random.normal(key=__A, shape=sample.shape )
UpperCAmelCase : Tuple = sigma + gamma * sigma
UpperCAmelCase : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : int = sample_hat + sigma_hat * model_output
UpperCAmelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase : int = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : jnp.ndarray, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : Tuple = sample_prev + sigma_prev * model_output
UpperCAmelCase : List[str] = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Optional[Any], __A : KarrasVeSchedulerState, __A : Optional[int], __A : int, __A : Union[str, Any] ):
raise NotImplementedError()
| 336 | 0 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
__a = ["""input_features"""]
def __init__( self : List[str] , UpperCamelCase : Union[str, Any]=80 , UpperCamelCase : Optional[int]=16_000 , UpperCamelCase : Optional[Any]=160 , UpperCamelCase : Tuple=30 , UpperCamelCase : Dict=400 , UpperCamelCase : Any=0.0 , UpperCamelCase : Any=False , **UpperCamelCase : Dict , ):
'''simple docstring'''
super().__init__(
feature_size=__A , sampling_rate=__A , padding_value=__A , return_attention_mask=__A , **__A , )
__UpperCAmelCase : List[str] = n_fft
__UpperCAmelCase : Union[str, Any] = hop_length
__UpperCAmelCase : Tuple = chunk_length
__UpperCAmelCase : Union[str, Any] = chunk_length * sampling_rate
__UpperCAmelCase : List[Any] = self.n_samples // hop_length
__UpperCAmelCase : Union[str, Any] = sampling_rate
__UpperCAmelCase : str = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__A , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=__A , norm="""slaney""" , mel_scale="""slaney""" , )
def lowerCamelCase__ ( self : Dict , UpperCamelCase : np.array ):
'''simple docstring'''
__UpperCAmelCase : Tuple = spectrogram(
__A , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
__UpperCAmelCase : Optional[Any] = log_spec[:, :-1]
__UpperCAmelCase : Union[str, Any] = np.maximum(__A , log_spec.max() - 8.0 )
__UpperCAmelCase : Tuple = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowerCamelCase__ ( UpperCamelCase : List[np.ndarray] , UpperCamelCase : List[np.ndarray] , UpperCamelCase : float = 0.0 ):
'''simple docstring'''
if attention_mask is not None:
__UpperCAmelCase : Union[str, Any] = np.array(__A , np.intaa )
__UpperCAmelCase : int = []
for vector, length in zip(__A , attention_mask.sum(-1 ) ):
__UpperCAmelCase : int = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
__UpperCAmelCase : int = padding_value
normed_input_values.append(__A )
else:
__UpperCAmelCase : Any = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[Any] , UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase : bool = True , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[str] = "max_length" , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[bool] = None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__UpperCAmelCase : List[str] = isinstance(__A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__UpperCAmelCase : List[str] = is_batched_numpy or (
isinstance(__A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCAmelCase : Optional[int] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__A , np.ndarray ):
__UpperCAmelCase : Optional[Any] = np.asarray(__A , dtype=np.floataa )
elif isinstance(__A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCAmelCase : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCAmelCase : List[Any] = [np.asarray([raw_speech] ).T]
__UpperCAmelCase : Any = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
__UpperCAmelCase : Union[str, Any] = self.pad(
__A , padding=__A , max_length=max_length if max_length else self.n_samples , truncation=__A , pad_to_multiple_of=__A , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
__UpperCAmelCase : Any = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
__UpperCAmelCase : Optional[Any] = np.stack(padded_inputs["""input_features"""] , axis=0 )
# make sure list is in array format
__UpperCAmelCase : Optional[Any] = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 )
__UpperCAmelCase : Dict = [self._np_extract_fbank_features(__A ) for waveform in input_features[0]]
if isinstance(input_features[0] , __A ):
__UpperCAmelCase : Dict = [np.asarray(__A , dtype=np.floataa ) for feature in input_features]
else:
__UpperCAmelCase : List[str] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
__UpperCAmelCase : int = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
__UpperCAmelCase : Optional[int] = padded_inputs.convert_to_tensors(__A )
return padded_inputs
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Any = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 115 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __UpperCAmelCase ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
UpperCamelCase = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def a__ ( ) -> Dict:
if os.name == "nt":
UpperCAmelCase : List[str] = CursorInfo()
UpperCAmelCase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
UpperCAmelCase : Dict = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def a__ ( ) -> Optional[int]:
if os.name == "nt":
UpperCAmelCase : int = CursorInfo()
UpperCAmelCase : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
UpperCAmelCase : Any = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def a__ ( ) -> Optional[Any]:
try:
hide_cursor()
yield
finally:
show_cursor()
| 336 | 0 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__lowercase : int = XLNetTokenizer
__lowercase : Optional[int] = XLNetTokenizerFast
__lowercase : Tuple = True
__lowercase : Union[str, Any] = True
def snake_case_ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE = XLNetTokenizer(__A , keep_accents=__A)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = '''<s>'''
__SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A) , __A)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A) , __A)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<unk>""")
self.assertEqual(vocab_keys[1] , """<s>""")
self.assertEqual(vocab_keys[-1] , """<eod>""")
self.assertEqual(len(__A) , 1_0_0_6)
def snake_case_ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XLNetTokenizer(__A , keep_accents=__A)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""")
self.assertListEqual(__A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2])
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(__A)
self.assertListEqual(__A , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4])
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(__A)
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XLNetTokenizer(__A , do_lower_case=__A)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""▁he""", """ll""", """o"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XLNetTokenizer(__A , do_lower_case=__A)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XLNetTokenizer.from_pretrained("""xlnet-base-cased""")
__SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=__A)
__SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__A)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__A , __A)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def snake_case_ ( self):
# fmt: off
__SCREAMING_SNAKE_CASE = {'''input_ids''': [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 100 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCamelCase : Tuple = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 336 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> Dict:
lowercase__ : Optional[Any] = tempfile.mkdtemp()
lowercase__ : List[Any] = BlipImageProcessor()
lowercase__ : Union[str, Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
lowercase__ : Optional[int] = BlipProcessor(__A , __A )
processor.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self : str , **lowercase_ : str ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__A ).tokenizer
def __UpperCamelCase ( self : List[Any] , **lowercase_ : str ) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__A ).image_processor
def __UpperCamelCase ( self : Tuple ) -> Any:
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : str ) -> List[str]:
lowercase__ : Union[str, Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowercase__ : str = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : str ) -> Dict:
lowercase__ : List[str] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : List[str] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ : Tuple = self.get_image_processor(do_normalize=__A , padding_value=1.0 )
lowercase__ : Union[str, Any] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __A )
def __UpperCamelCase ( self : str ) -> List[str]:
lowercase__ : int = self.get_image_processor()
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : Union[str, Any] = BlipProcessor(tokenizer=__A , image_processor=__A )
lowercase__ : List[str] = self.prepare_image_inputs()
lowercase__ : Optional[Any] = image_processor(__A , return_tensors="np" )
lowercase__ : Tuple = processor(images=__A , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : Any = self.get_tokenizer()
lowercase__ : int = BlipProcessor(tokenizer=__A , image_processor=__A )
lowercase__ : Optional[int] = '''lower newer'''
lowercase__ : Any = processor(text=__A )
lowercase__ : Optional[int] = tokenizer(__A , return_token_type_ids=__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
lowercase__ : List[Any] = self.get_image_processor()
lowercase__ : Union[str, Any] = self.get_tokenizer()
lowercase__ : int = BlipProcessor(tokenizer=__A , image_processor=__A )
lowercase__ : Any = '''lower newer'''
lowercase__ : Optional[int] = self.prepare_image_inputs()
lowercase__ : Tuple = processor(text=__A , images=__A )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def __UpperCamelCase ( self : Tuple ) -> Tuple:
lowercase__ : Dict = self.get_image_processor()
lowercase__ : Any = self.get_tokenizer()
lowercase__ : Dict = BlipProcessor(tokenizer=__A , image_processor=__A )
lowercase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : List[str] = processor.batch_decode(__A )
lowercase__ : int = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
lowercase__ : Any = self.get_image_processor()
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : str = BlipProcessor(tokenizer=__A , image_processor=__A )
lowercase__ : Dict = '''lower newer'''
lowercase__ : Optional[int] = self.prepare_image_inputs()
lowercase__ : Optional[Any] = processor(text=__A , images=__A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 87 |
from __future__ import annotations
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int ) -> list[str]:
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
UpperCAmelCase : str = number_of_bytes // partitions
UpperCAmelCase : Dict = []
for i in range(UpperCAmelCase ):
UpperCAmelCase : int = i * bytes_per_partition + 1
UpperCAmelCase : Optional[int] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self , __snake_case ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
_SCREAMING_SNAKE_CASE : Optional[Any] = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(__A )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = '''sshleifer/tiny-gpt2'''
_SCREAMING_SNAKE_CASE : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__A , multi_process=__A , )
_SCREAMING_SNAKE_CASE : Tuple = TensorFlowBenchmark(__A )
_SCREAMING_SNAKE_CASE : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = '''sgugger/tiny-distilbert-classification'''
_SCREAMING_SNAKE_CASE : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , only_pretrain_model=__A , )
_SCREAMING_SNAKE_CASE : List[str] = TensorFlowBenchmark(__A )
_SCREAMING_SNAKE_CASE : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = '''sshleifer/tiny-gpt2'''
_SCREAMING_SNAKE_CASE : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = TensorFlowBenchmark(__A )
_SCREAMING_SNAKE_CASE : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[int] = '''sshleifer/tiny-gpt2'''
_SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(__A )
_SCREAMING_SNAKE_CASE : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__A , multi_process=__A , )
_SCREAMING_SNAKE_CASE : Tuple = TensorFlowBenchmark(__A , [config] )
_SCREAMING_SNAKE_CASE : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Dict = '''sshleifer/tiny-gpt2'''
_SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(__A )
_SCREAMING_SNAKE_CASE : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = TensorFlowBenchmark(__A , [config] )
_SCREAMING_SNAKE_CASE : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[str] = '''sshleifer/tiny-gpt2'''
_SCREAMING_SNAKE_CASE : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , )
_SCREAMING_SNAKE_CASE : Optional[int] = TensorFlowBenchmark(__A )
_SCREAMING_SNAKE_CASE : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[int] = '''sshleifer/tiny-gpt2'''
_SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained(__A )
_SCREAMING_SNAKE_CASE : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , )
_SCREAMING_SNAKE_CASE : int = TensorFlowBenchmark(__A , [config] )
_SCREAMING_SNAKE_CASE : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = '''patrickvonplaten/t5-tiny-random'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(__A )
_SCREAMING_SNAKE_CASE : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , )
_SCREAMING_SNAKE_CASE : List[Any] = TensorFlowBenchmark(__A , configs=[config] )
_SCREAMING_SNAKE_CASE : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[Any] = '''sshleifer/tiny-gpt2'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__A , multi_process=__A , )
_SCREAMING_SNAKE_CASE : str = TensorFlowBenchmark(__A )
_SCREAMING_SNAKE_CASE : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[str] = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
_SCREAMING_SNAKE_CASE : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__A , save_to_csv=__A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__A , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(__A , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(__A , """env.csv""" ) , multi_process=__A , )
_SCREAMING_SNAKE_CASE : Optional[Any] = TensorFlowBenchmark(__A )
benchmark.run()
self.assertTrue(Path(os.path.join(__A , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__A , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__A , """env.csv""" ) ).exists() )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(__snake_case ):
self.assertTrue(hasattr(__A , """sequential""" ) )
self.assertTrue(hasattr(__A , """cumulative""" ) )
self.assertTrue(hasattr(__A , """current""" ) )
self.assertTrue(hasattr(__A , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_SCREAMING_SNAKE_CASE : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__A , """log.txt""" ) , log_print=__A , trace_memory_line_by_line=__A , eager_mode=__A , multi_process=__A , )
_SCREAMING_SNAKE_CASE : Optional[Any] = TensorFlowBenchmark(__A )
_SCREAMING_SNAKE_CASE : str = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__A , """log.txt""" ) ).exists() )
| 200 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_lowerCamelCase : Union[str, Any] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def a__ ( UpperCAmelCase : Dict=None ) -> Optional[int]:
if subparsers is not None:
UpperCAmelCase : Tuple = subparsers.add_parser('''tpu-config''' , description=_description )
else:
UpperCAmelCase : Dict = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
UpperCAmelCase : Optional[int] = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=UpperCAmelCase , default=UpperCAmelCase , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=UpperCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=UpperCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
UpperCAmelCase : Union[str, Any] = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=UpperCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase )
return parser
def a__ ( UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCAmelCase : List[Any] = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCAmelCase : List[str] = defaults.commands
if not args.tpu_name:
UpperCAmelCase : Tuple = defaults.tpu_name
if not args.tpu_zone:
UpperCAmelCase : int = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCAmelCase : Tuple = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
UpperCAmelCase : Dict = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , UpperCAmelCase ):
UpperCAmelCase : Optional[int] = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
UpperCAmelCase : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , UpperCAmelCase ):
UpperCAmelCase : int = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCAmelCase : Optional[int] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
UpperCAmelCase : int = '''; '''.join(UpperCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCAmelCase : Any = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {" ".join(UpperCAmelCase )}''' )
return
subprocess.run(UpperCAmelCase )
print('''Successfully setup pod.''' )
def a__ ( ) -> Any:
UpperCAmelCase : Any = tpu_command_parser()
UpperCAmelCase : Tuple = parser.parse_args()
tpu_command_launcher(UpperCAmelCase )
| 336 | 0 |
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE ) ->int:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
a__: Dict = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
print('''Loading config file...''' )
def flatten_yaml_as_dict(UpperCAmelCase : Tuple , UpperCAmelCase : Any="" , UpperCAmelCase : Dict="." ):
UpperCAmelCase : List[str] = []
for k, v in d.items():
UpperCAmelCase : List[Any] = parent_key + sep + k if parent_key else k
if isinstance(UpperCAmelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCAmelCase , UpperCAmelCase , sep=UpperCAmelCase ).items() )
else:
items.append((new_key, v) )
return dict(UpperCAmelCase )
UpperCAmelCase : List[str] = argparse.Namespace()
with open(UpperCAmelCase , '''r''' ) as yaml_file:
try:
UpperCAmelCase : List[str] = yaml.load(UpperCAmelCase , Loader=yaml.FullLoader )
UpperCAmelCase : Optional[int] = flatten_yaml_as_dict(UpperCAmelCase )
for k, v in flat_cfg.items():
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(UpperCAmelCase , str(UpperCAmelCase ) ) )
return config
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> List[Any]:
UpperCAmelCase : int = MobileViTVaConfig()
UpperCAmelCase : str = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
UpperCAmelCase : Any = 1_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase : Any = 384
else:
UpperCAmelCase : Tuple = 256
UpperCAmelCase : int = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
UpperCAmelCase : Optional[Any] = 21_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase : str = 384
else:
UpperCAmelCase : Dict = 256
UpperCAmelCase : List[Any] = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
UpperCAmelCase : Optional[Any] = 151
UpperCAmelCase : Tuple = 512
UpperCAmelCase : Tuple = '''ade20k-id2label.json'''
UpperCAmelCase : Tuple = True
elif task_name.startswith('''voc_''' ):
UpperCAmelCase : Dict = 21
UpperCAmelCase : str = 512
UpperCAmelCase : Union[str, Any] = '''pascal-voc-id2label.json'''
UpperCAmelCase : Dict = True
# orig_config
UpperCAmelCase : List[Any] = load_orig_config_file(UpperCAmelCase )
assert getattr(UpperCAmelCase , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
UpperCAmelCase : Tuple = getattr(UpperCAmelCase , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(UpperCAmelCase , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCAmelCase : int = getattr(UpperCAmelCase , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCAmelCase : str = getattr(UpperCAmelCase , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
UpperCAmelCase : int = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
UpperCAmelCase : Any = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
UpperCAmelCase : Optional[Any] = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
UpperCAmelCase : Union[str, Any] = '''huggingface/label-files'''
UpperCAmelCase : List[Any] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Any = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : int = idalabel
UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] ) -> List[str]:
UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase )
UpperCAmelCase : List[str] = val
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=False ) -> Union[str, Any]:
if base_model:
UpperCAmelCase : Dict = ''''''
else:
UpperCAmelCase : Dict = '''mobilevitv2.'''
UpperCAmelCase : Optional[int] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCAmelCase : List[str] = k[8:]
else:
UpperCAmelCase : Dict = k
if ".block." in k:
UpperCAmelCase : List[Any] = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
UpperCAmelCase : List[str] = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
UpperCAmelCase : Union[str, Any] = k_new.replace('''conv_1.''' , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
UpperCAmelCase : Optional[Any] = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
UpperCAmelCase : int = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
UpperCAmelCase : Any = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
UpperCAmelCase : str = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
UpperCAmelCase : int = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
UpperCAmelCase : Dict = [0, 1]
elif i == 4:
UpperCAmelCase : Dict = [0, 1, 2, 3]
elif i == 5:
UpperCAmelCase : int = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
UpperCAmelCase : Optional[Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
UpperCAmelCase : Any = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
UpperCAmelCase : Optional[Any] = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
UpperCAmelCase : List[Any] = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
UpperCAmelCase : List[Any] = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
UpperCAmelCase : Any = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
UpperCAmelCase : Union[str, Any] = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
UpperCAmelCase : Tuple = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def a__ ( UpperCAmelCase : Union[str, Any] ) -> Any:
UpperCAmelCase : str = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(UpperCAmelCase )
for k in keys_to_ignore:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def a__ ( ) -> Union[str, Any]:
UpperCAmelCase : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCAmelCase : List[str] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = get_mobilevitva_config(UpperCAmelCase , UpperCAmelCase )
# load original state_dict
UpperCAmelCase : List[str] = torch.load(UpperCAmelCase , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
UpperCAmelCase : str = MobileViTVaForSemanticSegmentation(UpperCAmelCase ).eval()
UpperCAmelCase : str = False
else:
UpperCAmelCase : Union[str, Any] = MobileViTVaForImageClassification(UpperCAmelCase ).eval()
UpperCAmelCase : Any = False
# remove and rename some keys of load the original model
UpperCAmelCase : Optional[Any] = checkpoint
remove_unused_keys(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = create_rename_keys(UpperCAmelCase , base_model=UpperCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# load modified state_dict
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase : Dict = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase : Any = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase : Union[str, Any] = model(**UpperCAmelCase )
# verify classification model
if task_name.startswith('''imagenet''' ):
UpperCAmelCase : Optional[Any] = outputs.logits
UpperCAmelCase : int = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCAmelCase : str = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] )
assert torch.allclose(logits[0, :3] , UpperCAmelCase , atol=1E-4 )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 336 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_A = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 171 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __UpperCAmelCase ( lowerCamelCase__ ):
def __get__( self : Tuple, __A : Optional[Any], __A : Optional[int]=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
UpperCAmelCase : str = '''__cached_''' + self.fget.__name__
UpperCAmelCase : int = getattr(__A, __A, __A )
if cached is None:
UpperCAmelCase : Any = self.fget(__A )
setattr(__A, __A, __A )
return cached
def a__ ( UpperCAmelCase : Optional[Any] ) -> Any:
UpperCAmelCase : Any = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
if is_torch_fx_proxy(UpperCAmelCase ):
return True
if is_torch_available():
import torch
if isinstance(UpperCAmelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(UpperCAmelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(UpperCAmelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(UpperCAmelCase , np.ndarray )
def a__ ( UpperCAmelCase : List[Any] ) -> Union[str, Any]:
return isinstance(UpperCAmelCase , np.ndarray )
def a__ ( UpperCAmelCase : str ) -> Tuple:
return _is_numpy(UpperCAmelCase )
def a__ ( UpperCAmelCase : str ) -> List[Any]:
import torch
return isinstance(UpperCAmelCase , torch.Tensor )
def a__ ( UpperCAmelCase : str ) -> List[Any]:
return False if not is_torch_available() else _is_torch(UpperCAmelCase )
def a__ ( UpperCAmelCase : Tuple ) -> List[str]:
import torch
return isinstance(UpperCAmelCase , torch.device )
def a__ ( UpperCAmelCase : Any ) -> Any:
return False if not is_torch_available() else _is_torch_device(UpperCAmelCase )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
import torch
if isinstance(UpperCAmelCase , UpperCAmelCase ):
if hasattr(UpperCAmelCase , UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase )
else:
return False
return isinstance(UpperCAmelCase , torch.dtype )
def a__ ( UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
return False if not is_torch_available() else _is_torch_dtype(UpperCAmelCase )
def a__ ( UpperCAmelCase : Any ) -> str:
import tensorflow as tf
return isinstance(UpperCAmelCase , tf.Tensor )
def a__ ( UpperCAmelCase : int ) -> Union[str, Any]:
return False if not is_tf_available() else _is_tensorflow(UpperCAmelCase )
def a__ ( UpperCAmelCase : List[str] ) -> Tuple:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(UpperCAmelCase , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(UpperCAmelCase )
return type(UpperCAmelCase ) == tf.Tensor
def a__ ( UpperCAmelCase : int ) -> List[Any]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCAmelCase )
def a__ ( UpperCAmelCase : List[Any] ) -> Dict:
import jax.numpy as jnp # noqa: F811
return isinstance(UpperCAmelCase , jnp.ndarray )
def a__ ( UpperCAmelCase : List[Any] ) -> Optional[int]:
return False if not is_flax_available() else _is_jax(UpperCAmelCase )
def a__ ( UpperCAmelCase : int ) -> Tuple:
if isinstance(UpperCAmelCase , (dict, UserDict) ):
return {k: to_py_obj(UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(UpperCAmelCase , (list, tuple) ):
return [to_py_obj(UpperCAmelCase ) for o in obj]
elif is_tf_tensor(UpperCAmelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(UpperCAmelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(UpperCAmelCase ):
return np.asarray(UpperCAmelCase ).tolist()
elif isinstance(UpperCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a__ ( UpperCAmelCase : Any ) -> List[str]:
if isinstance(UpperCAmelCase , (dict, UserDict) ):
return {k: to_numpy(UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(UpperCAmelCase , (list, tuple) ):
return np.array(UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
return obj.numpy()
elif is_torch_tensor(UpperCAmelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(UpperCAmelCase ):
return np.asarray(UpperCAmelCase )
else:
return obj
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Optional[Any] = fields(self )
# Safety and consistency checks
if not len(__A ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
UpperCAmelCase : int = getattr(self, class_fields[0].name )
UpperCAmelCase : str = all(getattr(self, field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__A ):
if isinstance(__A, __A ):
UpperCAmelCase : Tuple = first_field.items()
UpperCAmelCase : Any = True
else:
try:
UpperCAmelCase : Optional[Any] = iter(__A )
UpperCAmelCase : Optional[Any] = True
except TypeError:
UpperCAmelCase : Optional[int] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__A ):
if (
not isinstance(__A, (list, tuple) )
or not len(__A ) == 2
or not isinstance(element[0], __A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase : Any = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self, element[0], element[1] )
if element[1] is not None:
UpperCAmelCase : Union[str, Any] = element[1]
elif first_field is not None:
UpperCAmelCase : Union[str, Any] = first_field
else:
for field in class_fields:
UpperCAmelCase : Optional[Any] = getattr(self, field.name )
if v is not None:
UpperCAmelCase : Optional[int] = v
def __delitem__( self : Union[str, Any], *__A : str, **__A : Tuple ):
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : List[str], *__A : Union[str, Any], **__A : Optional[Any] ):
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : Any, *__A : Dict, **__A : str ):
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : Dict, *__A : int, **__A : Dict ):
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : List[str], __A : List[str] ):
if isinstance(__A, __A ):
UpperCAmelCase : int = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[Any], __A : Dict, __A : Union[str, Any] ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__A, __A )
super().__setattr__(__A, __A )
def __setitem__( self : Dict, __A : List[Any], __A : Union[str, Any] ):
# Will raise a KeyException if needed
super().__setitem__(__A, __A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__A, __A )
def __magic_name__ ( self : List[str] ):
return tuple(self[k] for k in self.keys() )
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@classmethod
def __magic_name__ ( cls : List[Any], __A : Tuple ):
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """longest"""
UpperCamelCase = """max_length"""
UpperCamelCase = """do_not_pad"""
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """pt"""
UpperCamelCase = """tf"""
UpperCamelCase = """np"""
UpperCamelCase = """jax"""
class __UpperCAmelCase :
def __init__( self : Any, __A : List[ContextManager] ):
UpperCAmelCase : Tuple = context_managers
UpperCAmelCase : Tuple = ExitStack()
def __enter__( self : Any ):
for context_manager in self.context_managers:
self.stack.enter_context(__A )
def __exit__( self : List[Any], *__A : Union[str, Any], **__A : Dict ):
self.stack.__exit__(*__A, **__A )
def a__ ( UpperCAmelCase : Union[str, Any] ) -> str:
UpperCAmelCase : int = infer_framework(UpperCAmelCase )
if framework == "tf":
UpperCAmelCase : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a__ ( UpperCAmelCase : Dict ) -> Any:
UpperCAmelCase : List[Any] = model_class.__name__
UpperCAmelCase : Union[str, Any] = infer_framework(UpperCAmelCase )
if framework == "tf":
UpperCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : Dict = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a__ ( UpperCAmelCase : MutableMapping , UpperCAmelCase : str = "" , UpperCAmelCase : str = "." ) -> Union[str, Any]:
def _flatten_dict(UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]="" , UpperCAmelCase : Any="." ):
for k, v in d.items():
UpperCAmelCase : List[str] = str(UpperCAmelCase ) + delimiter + str(UpperCAmelCase ) if parent_key else k
if v and isinstance(UpperCAmelCase , UpperCAmelCase ):
yield from flatten_dict(UpperCAmelCase , UpperCAmelCase , delimiter=UpperCAmelCase ).items()
else:
yield key, v
return dict(_flatten_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
@contextmanager
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : bool = False ) -> Optional[Any]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=None ) -> Optional[Any]:
if is_numpy_array(UpperCAmelCase ):
return np.transpose(UpperCAmelCase , axes=UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.T if axes is None else array.permute(*UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.transpose(UpperCAmelCase , perm=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.transpose(UpperCAmelCase , axes=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for transpose: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : str , UpperCAmelCase : Optional[int] ) -> List[str]:
if is_numpy_array(UpperCAmelCase ):
return np.reshape(UpperCAmelCase , UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.reshape(*UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.reshape(UpperCAmelCase , UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.reshape(UpperCAmelCase , UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for reshape: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=None ) -> Any:
if is_numpy_array(UpperCAmelCase ):
return np.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for squeeze: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : str , UpperCAmelCase : int ) -> str:
if is_numpy_array(UpperCAmelCase ):
return np.expand_dims(UpperCAmelCase , UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.unsqueeze(dim=UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.expand_dims(UpperCAmelCase , axis=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.expand_dims(UpperCAmelCase , axis=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
if is_numpy_array(UpperCAmelCase ):
return np.size(UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.numel()
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.size(UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ) -> Dict:
for key, value in auto_map.items():
if isinstance(UpperCAmelCase , (tuple, list) ):
UpperCAmelCase : List[Any] = [f'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase : List[Any] = f'''{repo_id}--{value}'''
return auto_map
def a__ ( UpperCAmelCase : Tuple ) -> Union[str, Any]:
for base_class in inspect.getmro(UpperCAmelCase ):
UpperCAmelCase : Any = base_class.__module__
UpperCAmelCase : Dict = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 336 | 0 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __UpperCamelCase ( _A ):
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __UpperCamelCase ( ):
lowerCAmelCase_ = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_A )
lowerCAmelCase_ = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_A )
EnvironmentCommand.register_subcommand(_A )
TestCommand.register_subcommand(_A )
RunBeamCommand.register_subcommand(_A )
DummyDataCommand.register_subcommand(_A )
# Parse args
lowerCAmelCase_ = parser.parse_known_args()
if not hasattr(_A , '''func''' ):
parser.print_help()
exit(1 )
lowerCAmelCase_ = parse_unknown_args(_A )
# Run
lowerCAmelCase_ = args.func(_A , **_A )
service.run()
if __name__ == "__main__":
main()
| 278 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = LayoutLMTokenizer
UpperCamelCase = LayoutLMTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __magic_name__ ( self : Any ):
super().setUp()
UpperCAmelCase : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __magic_name__ ( self : Union[str, Any], **__A : List[str] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname, **__A )
def __magic_name__ ( self : Optional[int], __A : int ):
UpperCAmelCase : Optional[Any] = '''UNwant\u00E9d,running'''
UpperCAmelCase : Optional[int] = '''unwanted, running'''
return input_text, output_text
def __magic_name__ ( self : Any ):
UpperCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__A, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ), [7, 4, 5, 1_0, 8, 9] )
def __magic_name__ ( self : Optional[int] ):
pass
| 336 | 0 |
from __future__ import annotations
import math
def _A ( _lowercase ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__snake_case = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def _A ( _lowercase ) -> list[int]:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
__UpperCamelCase = []
for num in range(len(_lowercase ) ):
__UpperCamelCase = 0
while 2 * i * i <= odd_composites[num]:
__UpperCamelCase = odd_composites[num] - 2 * i * i
if is_prime(_lowercase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(_lowercase ) == n:
return list_nums
return []
def _A ( ) -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 310 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCAmelCase :
def __init__( self : Any, __A : str, __A : Dict=1_3, __A : int=3_0, __A : Tuple=2, __A : Union[str, Any]=3, __A : Any=True, __A : str=True, __A : Dict=3_2, __A : List[Any]=2, __A : Optional[Any]=4, __A : Union[str, Any]=3_7, __A : int="gelu", __A : int=0.1, __A : List[Any]=0.1, __A : Tuple=1_0, __A : Tuple=0.0_2, __A : Any=3, __A : List[str]=0.6, __A : Any=None, ):
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : Dict = patch_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : Union[str, Any] = is_training
UpperCAmelCase : Union[str, Any] = use_labels
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : Optional[int] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Tuple = mask_ratio
UpperCAmelCase : Any = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase : Tuple = (image_size // patch_size) ** 2
UpperCAmelCase : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : str = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Optional[Any] ):
return ViTMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, decoder_hidden_size=self.hidden_size, decoder_num_hidden_layers=self.num_hidden_layers, decoder_num_attention_heads=self.num_attention_heads, decoder_intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, mask_ratio=self.mask_ratio, )
def __magic_name__ ( self : str, __A : List[Any], __A : Any, __A : Any ):
UpperCAmelCase : Optional[Any] = TFViTMAEModel(config=__A )
UpperCAmelCase : Tuple = model(__A, training=__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Tuple, __A : str, __A : int, __A : str ):
UpperCAmelCase : Dict = TFViTMAEForPreTraining(__A )
UpperCAmelCase : int = model(__A, training=__A )
# expected sequence length = num_patches
UpperCAmelCase : int = (self.image_size // self.patch_size) ** 2
UpperCAmelCase : Optional[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase : Tuple = 1
UpperCAmelCase : List[Any] = TFViTMAEForPreTraining(__A )
UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = model(__A, training=__A )
UpperCAmelCase : Union[str, Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : Union[str, Any] = config_and_inputs
UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
UpperCamelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = TFViTMAEModelTester(self )
UpperCAmelCase : int = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 )
def __magic_name__ ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[str] = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
UpperCAmelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A, tf.keras.layers.Layer ) )
def __magic_name__ ( self : str ):
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Any = model_class(__A )
UpperCAmelCase : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : int = [*signature.parameters.keys()]
UpperCAmelCase : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def __magic_name__ ( self : int ):
# make the mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : str = model_class(__A )
UpperCAmelCase : int = self._prepare_for_class(__A, __A )
UpperCAmelCase : Dict = model(__A, noise=__A )
UpperCAmelCase : Any = copy.deepcopy(self._prepare_for_class(__A, __A ) )
UpperCAmelCase : Union[str, Any] = model(**__A, noise=__A )
UpperCAmelCase : Dict = outputs_dict[0].numpy()
UpperCAmelCase : Tuple = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ), 1E-6 )
def __magic_name__ ( self : Optional[Any] ):
# make the mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : str = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__A : Union[str, Any] ):
UpperCAmelCase : str = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__A ):
UpperCAmelCase : Tuple = v.numpy()
else:
UpperCAmelCase : str = np.array(__A )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
UpperCAmelCase : Any = self._prepare_for_class(__A, __A )
UpperCAmelCase : Optional[int] = prepare_numpy_arrays(__A )
UpperCAmelCase : str = model(__A, noise=__A )
UpperCAmelCase : str = model(**__A, noise=__A )
self.assert_outputs_same(__A, __A )
def __magic_name__ ( self : int, __A : str, __A : Union[str, Any], __A : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase : Any = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase : int = tf.constant(__A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase : List[Any] = tf_noise
super().check_pt_tf_models(__A, __A, __A )
def __magic_name__ ( self : str ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Union[str, Any] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__A )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(__A, __A ),)
if isinstance(__A, __A )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__A, '''_keras_serializable''', __A )
}
UpperCAmelCase : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase : str = tf.convert_to_tensor(__A )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
UpperCAmelCase : Tuple = main_layer_class(__A )
UpperCAmelCase : int = {
name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCAmelCase : List[Any] = tf.keras.Model(__A, outputs=main_layer(__A ) )
UpperCAmelCase : List[Any] = model(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = os.path.join(__A, '''keras_model.h5''' )
model.save(__A )
UpperCAmelCase : List[str] = tf.keras.models.load_model(
__A, custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__A, tf.keras.Model )
UpperCAmelCase : Tuple = model(__A )
self.assert_outputs_same(__A, __A )
@slow
def __magic_name__ ( self : Dict ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : List[str] = self._prepare_for_class(__A, __A )
UpperCAmelCase : Union[str, Any] = model(__A, noise=__A )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase : Optional[int] = outputs.last_hidden_state.numpy()
UpperCAmelCase : Union[str, Any] = 0
else:
UpperCAmelCase : Optional[int] = outputs.logits.numpy()
UpperCAmelCase : int = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A, saved_model=__A )
UpperCAmelCase : Dict = model_class.from_pretrained(__A )
UpperCAmelCase : str = model(__A, noise=__A )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase : int = after_outputs['''last_hidden_state'''].numpy()
UpperCAmelCase : Dict = 0
else:
UpperCAmelCase : Any = after_outputs['''logits'''].numpy()
UpperCAmelCase : Dict = 0
UpperCAmelCase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__A, 1E-5 )
def __magic_name__ ( self : Optional[Any] ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
UpperCAmelCase : int = self._prepare_for_class(__A, __A )
UpperCAmelCase : List[Any] = model(__A, noise=__A )
UpperCAmelCase : str = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__A )
UpperCAmelCase : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCAmelCase : str = model_class.from_config(model.config )
UpperCAmelCase : List[str] = new_model(__A ) # Build model
new_model.set_weights(model.get_weights() )
UpperCAmelCase : Tuple = new_model(__A, noise=__A )
self.assert_outputs_same(__A, __A )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __magic_name__ ( self : Optional[int] ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def __magic_name__ ( self : Tuple ):
pass
@slow
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(__A )
def a__ ( ) -> Dict:
UpperCAmelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : List[str] ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : str ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase : Tuple = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
UpperCAmelCase : List[str] = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : str = image_processor(images=__A, return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase : Optional[int] = ViTMAEConfig()
UpperCAmelCase : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCAmelCase : Optional[int] = model(**__A, noise=__A )
# verify the logits
UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : List[str] = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3], __A, atol=1E-4 )
| 336 | 0 |
from __future__ import annotations
SCREAMING_SNAKE_CASE :Dict = [True] * 1_00_00_01
SCREAMING_SNAKE_CASE :List[Any] = 2
while i * i <= 1_00_00_00:
if seive[i]:
for j in range(i * i, 1_00_00_01, i):
SCREAMING_SNAKE_CASE :List[Any] = False
i += 1
def _lowerCAmelCase ( lowerCAmelCase_ :int )->bool:
'''simple docstring'''
return seive[n]
def _lowerCAmelCase ( lowerCAmelCase_ :int )->bool:
'''simple docstring'''
return any(digit in "02468" for digit in str(lowerCAmelCase_ ) )
def _lowerCAmelCase ( lowerCAmelCase_ :int = 1_000_000 )->list[int]:
'''simple docstring'''
snake_case_ = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(lowerCAmelCase_ ) and not contains_an_even_digit(lowerCAmelCase_ ):
snake_case_ = str(lowerCAmelCase_ )
snake_case_ = [int(str_num[j:] + str_num[:j] ) for j in range(len(lowerCAmelCase_ ) )]
if all(is_prime(lowerCAmelCase_ ) for i in list_nums ):
result.append(lowerCAmelCase_ )
return result
def _lowerCAmelCase ( )->int:
'''simple docstring'''
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'''{len(find_circular_primes()) = }''')
| 159 |
def a__ ( UpperCAmelCase : int ) -> int:
UpperCAmelCase : list[list[int]] = [[0 for _ in range(UpperCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , UpperCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_lowerCamelCase : List[Any] = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
_lowerCamelCase : str = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 336 | 0 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowercase ( self : Union[str, Any] ):
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__lowercase = '''xvjiarui/stable-diffusion-2-inpainting'''
__lowercase = FlaxStableDiffusionInpaintPipeline.from_pretrained(__A, safety_checker=__A )
__lowercase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 5_0
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = num_samples * [init_image]
__lowercase = num_samples * [mask_image]
__lowercase = pipeline.prepare_inputs(__A, __A, __A )
# shard inputs and rng
__lowercase = replicate(__A )
__lowercase = jax.random.split(__A, jax.device_count() )
__lowercase = shard(__A )
__lowercase = shard(__A )
__lowercase = shard(__A )
__lowercase = pipeline(
__A, __A, __A, __A, __A, __A, jit=__A )
__lowercase = output.images.reshape(__A, 5_1_2, 5_1_2, 3 )
__lowercase = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array(
[0.3_611_307, 0.37_649_736, 0.3_757_408, 0.38_213_953, 0.39_295_167, 0.3_841_631, 0.41_554_978, 0.4_137_475, 0.4_217_084] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 17 |
from __future__ import annotations
def a__ ( UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Union[str, Any] = len(UpperCAmelCase )
# We need to create solution object to save path.
UpperCAmelCase : int = [[0 for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )]
UpperCAmelCase : Union[str, Any] = run_maze(UpperCAmelCase , 0 , 0 , UpperCAmelCase )
if solved:
print('''\n'''.join(str(UpperCAmelCase ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def a__ ( UpperCAmelCase : list[list[int]] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Dict = len(UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
UpperCAmelCase : Dict = 1
return True
UpperCAmelCase : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds
UpperCAmelCase : List[Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCAmelCase : Any = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCAmelCase : str = 1
# check for directions
if (
run_maze(UpperCAmelCase , i + 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j + 1 , UpperCAmelCase )
or run_maze(UpperCAmelCase , i - 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j - 1 , UpperCAmelCase )
):
return True
UpperCAmelCase : Any = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 0 |
def lowercase_ ( _A : int ):
"""simple docstring"""
if number > 0:
raise ValueError("input must be a negative integer" )
lowerCamelCase__ : int = len(bin(_A )[3:] )
lowerCamelCase__ : List[str] = bin(abs(_A ) - (1 << binary_number_length) )[3:]
lowerCamelCase__ : str = (
(
'''1'''
+ '''0''' * (binary_number_length - len(_A ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 184 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __UpperCAmelCase :
def __init__( self : List[Any], __A : List[str], __A : List[str]=1_3, __A : Any=6_4, __A : Optional[Any]=2, __A : str=3, __A : str=True, __A : str=True, __A : Optional[Any]=3_2, __A : List[str]=5, __A : int=4, __A : str=3_7, __A : str="gelu", __A : Dict=0.1, __A : List[Any]=0.1, __A : Dict=1_0, __A : int=0.0_2, __A : Any=[1, 1_6, 4, 4], __A : Optional[int]=None, ):
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : Any = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : List[str] = patch_size
UpperCAmelCase : Dict = num_channels
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Union[str, Any] = num_hidden_layers
UpperCAmelCase : Optional[Any] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase : str = type_sequence_label_size
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : int = scope
UpperCAmelCase : List[str] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
UpperCAmelCase : str = (self.image_size // 3_2) ** 2
UpperCAmelCase : List[str] = num_patches + 1
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : str = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Any ):
UpperCAmelCase : Dict = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 1_6, 3_2],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, backbone_featmap_shape=self.backbone_featmap_shape, backbone_config=__A, )
def __magic_name__ ( self : Optional[int], __A : Optional[int], __A : int, __A : Tuple ):
UpperCAmelCase : int = ViTHybridModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Tuple = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Tuple, __A : Dict, __A : str, __A : List[str] ):
UpperCAmelCase : str = self.type_sequence_label_size
UpperCAmelCase : List[Any] = ViTHybridForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self : int ):
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = config_and_inputs
UpperCAmelCase : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Any = ViTHybridModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 )
def __magic_name__ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : int ):
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCAmelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A, nn.Linear ) )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(__A )
UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : str = [*signature.parameters.keys()]
UpperCAmelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = _config_zero_init(__A )
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(config=__A )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
UpperCAmelCase : Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@slow
def __magic_name__ ( self : List[str] ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Union[str, Any] = ViTHybridModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> Tuple:
UpperCAmelCase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : str ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__A )
UpperCAmelCase : Tuple = self.default_image_processor
UpperCAmelCase : int = prepare_img()
UpperCAmelCase : Union[str, Any] = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**__A )
# verify the logits
UpperCAmelCase : str = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Optional[Any] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@slow
@require_accelerate
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Union[str, Any] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''', device_map='''auto''' )
UpperCAmelCase : Tuple = prepare_img()
UpperCAmelCase : Optional[int] = image_processor(images=__A, return_tensors='''pt''' )
UpperCAmelCase : Dict = model(**__A )
UpperCAmelCase : Any = outputs.logits
# model predicts one of the 1000 ImageNet classes
UpperCAmelCase : Dict = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx], '''tabby, tabby cat''' )
| 336 | 0 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {"vocab_file": "spiece.model"}
UpperCAmelCase : Dict = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCAmelCase : Any = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
UpperCAmelCase : List[Any] = "▁"
class lowerCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ["""input_ids""", """attention_mask"""]
def __init__( self : int , UpperCamelCase : Any , UpperCamelCase : Any="</s>" , UpperCamelCase : List[str]="<unk>" , UpperCamelCase : Dict="<pad>" , UpperCamelCase : List[Any]=100 , UpperCamelCase : Any=None , UpperCamelCase : Optional[Dict[str, Any]] = None , UpperCamelCase : List[str]=True , **UpperCamelCase : Tuple , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
__UpperCAmelCase : Any = [f'''<extra_id_{i}>''' for i in range(__A )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__UpperCAmelCase : Union[str, Any] = len(set(filter(lambda UpperCamelCase : bool("""extra_id""" in str(__A ) ) , __A ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
__UpperCAmelCase : List[Any] = legacy
__UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__A , unk_token=__A , pad_token=__A , extra_ids=__A , additional_special_tokens=__A , sp_model_kwargs=self.sp_model_kwargs , legacy=__A , **__A , )
__UpperCAmelCase : Union[str, Any] = vocab_file
__UpperCAmelCase : str = extra_ids
__UpperCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
@staticmethod
def lowerCamelCase__ ( UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
__UpperCAmelCase : int = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , __A , )
return max_model_length
@property
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : int = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self : str , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__A )) + [1]
return ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1]
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase : bool(re.search(R"""<extra_id_\d+>""" , __A ) ) is not None , self.additional_special_tokens ) ) )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return [self._convert_token_to_id(__A ) for token in self.get_sentinel_tokens()]
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : List[int] ):
'''simple docstring'''
if len(__A ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : Tuple = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self._add_eos_if_not_present(__A )
if token_ids_a is None:
return token_ids_a
else:
__UpperCAmelCase : Union[str, Any] = self._add_eos_if_not_present(__A )
return token_ids_a + token_ids_a
def __getstate__( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.__dict__.copy()
__UpperCAmelCase : List[Any] = None
return state
def __setstate__( self : List[str] , UpperCamelCase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__UpperCAmelCase : List[str] = {}
__UpperCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : "TextInput" , **UpperCamelCase : List[str] ):
'''simple docstring'''
if not self.legacy:
__UpperCAmelCase : int = SPIECE_UNDERLINE + text.replace(__A , """ """ )
return super().tokenize(__A , **__A )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Tuple , **UpperCamelCase : Any ):
'''simple docstring'''
if not self.legacy:
__UpperCAmelCase : int = text.startswith(__A )
if is_first:
__UpperCAmelCase : str = text[1:]
__UpperCAmelCase : str = self.sp_model.encode(__A , out_type=__A )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(__A ):
__UpperCAmelCase : Optional[Any] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Dict ):
'''simple docstring'''
if token.startswith("""<extra_id_""" ):
__UpperCAmelCase : Union[str, Any] = re.match(R"""<extra_id_(\d+)>""" , __A )
__UpperCAmelCase : Optional[Any] = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(__A )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
__UpperCAmelCase : Union[str, Any] = self.sp_model.IdToPiece(__A )
else:
__UpperCAmelCase : Tuple = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : str = []
__UpperCAmelCase : Any = ''''''
__UpperCAmelCase : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : int = []
else:
current_sub_tokens.append(__A )
__UpperCAmelCase : Any = False
out_string += self.sp_model.decode(__A )
return out_string.strip()
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : Optional[Any] = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , """wb""" ) as fi:
__UpperCAmelCase : Any = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
| 115 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def a__ ( ) -> tuple[list[int], int]:
UpperCAmelCase : str = [randint(-1_000 , 1_000 ) for i in range(10 )]
UpperCAmelCase : Any = randint(-5_000 , 5_000 )
return (arr, r)
_lowerCamelCase : Any = make_dataset()
def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, ...]:
for triplet in permutations(UpperCAmelCase , 3 ):
if sum(UpperCAmelCase ) == target:
return tuple(sorted(UpperCAmelCase ) )
return (0, 0, 0)
def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, int, int]:
arr.sort()
UpperCAmelCase : Tuple = len(UpperCAmelCase )
for i in range(n - 1 ):
UpperCAmelCase , UpperCAmelCase : int = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def a__ ( ) -> tuple[float, float]:
UpperCAmelCase : Union[str, Any] = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
UpperCAmelCase : Tuple = '''
triplet_sum1(*dataset)
'''
UpperCAmelCase : List[str] = '''
triplet_sum2(*dataset)
'''
UpperCAmelCase : Tuple = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 )
UpperCAmelCase : str = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 )
return (min(UpperCAmelCase ), min(UpperCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCamelCase : int = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 336 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE_ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
__lowercase : Optional[int] = ['''flax''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""flax"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""flax"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""flax"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
__lowercase : int = ['''flax''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""flax"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""flax"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""flax"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
__lowercase : str = ['''flax''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""flax"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""flax"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""flax"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
__lowercase : Optional[Any] = ['''flax''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""flax"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""flax"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""flax"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
__lowercase : Tuple = ['''flax''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""flax"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""flax"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""flax"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
__lowercase : str = ['''flax''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""flax"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""flax"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""flax"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
__lowercase : Optional[int] = ['''flax''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""flax"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""flax"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""flax"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
__lowercase : Union[str, Any] = ['''flax''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""flax"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""flax"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""flax"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
__lowercase : Dict = ['''flax''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""flax"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""flax"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""flax"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
__lowercase : str = ['''flax''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""flax"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""flax"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""flax"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
__lowercase : List[str] = ['''flax''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""flax"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""flax"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""flax"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
__lowercase : Optional[int] = ['''flax''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""flax"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""flax"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""flax"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
__lowercase : Union[str, Any] = ['''flax''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""flax"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""flax"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""flax"""])
| 100 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __UpperCAmelCase :
def __magic_name__ ( self : int, __A : Dict ):
raise NotImplementedError()
def __magic_name__ ( self : int ):
raise NotImplementedError()
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : str, __A : "AutoTokenizer", __A : bool = False, **__A : str ):
UpperCAmelCase : List[str] = tokenizer
UpperCAmelCase : str = skip_prompt
UpperCAmelCase : List[str] = decode_kwargs
# variables used in the streaming process
UpperCAmelCase : Dict = []
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = True
def __magic_name__ ( self : Dict, __A : Optional[int] ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
UpperCAmelCase : Union[str, Any] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
UpperCAmelCase : Optional[int] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
UpperCAmelCase : Any = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
UpperCAmelCase : Union[str, Any] = text[self.print_len :]
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
# If the last token is a CJK character, we print the characters.
elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
UpperCAmelCase : Union[str, Any] = text[self.print_len :]
self.print_len += len(__A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
UpperCAmelCase : Optional[Any] = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(__A )
self.on_finalized_text(__A )
def __magic_name__ ( self : str ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
UpperCAmelCase : int = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
UpperCAmelCase : Dict = text[self.print_len :]
UpperCAmelCase : List[Any] = []
UpperCAmelCase : List[Any] = 0
else:
UpperCAmelCase : Dict = ''''''
UpperCAmelCase : str = True
self.on_finalized_text(__A, stream_end=__A )
def __magic_name__ ( self : List[str], __A : str, __A : bool = False ):
print(__A, flush=__A, end='''''' if not stream_end else None )
def __magic_name__ ( self : List[Any], __A : Optional[int] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Dict, __A : "AutoTokenizer", __A : bool = False, __A : Optional[float] = None, **__A : str ):
super().__init__(__A, __A, **__A )
UpperCAmelCase : Dict = Queue()
UpperCAmelCase : Any = None
UpperCAmelCase : Any = timeout
def __magic_name__ ( self : Dict, __A : str, __A : bool = False ):
self.text_queue.put(__A, timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal, timeout=self.timeout )
def __iter__( self : int ):
return self
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : List[Any] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 336 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
"configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST",
"PegasusXForConditionalGeneration",
"PegasusXModel",
"PegasusXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 |
import numpy
# List of input, output pairs
_lowerCamelCase : Dict = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
_lowerCamelCase : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
_lowerCamelCase : Dict = [2, 4, 1, 5]
_lowerCamelCase : Dict = len(train_data)
_lowerCamelCase : int = 0.0_0_9
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]="train" ) -> Dict:
return calculate_hypothesis_value(UpperCAmelCase , UpperCAmelCase ) - output(
UpperCAmelCase , UpperCAmelCase )
def a__ ( UpperCAmelCase : int ) -> Any:
UpperCAmelCase : str = 0
for i in range(len(UpperCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> Optional[int]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def a__ ( UpperCAmelCase : int , UpperCAmelCase : Optional[Any] ) -> List[str]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : str=m ) -> Dict:
UpperCAmelCase : Optional[int] = 0
for i in range(UpperCAmelCase ):
if index == -1:
summation_value += _error(UpperCAmelCase )
else:
summation_value += _error(UpperCAmelCase ) * train_data[i][0][index]
return summation_value
def a__ ( UpperCAmelCase : Dict ) -> Dict:
UpperCAmelCase : Dict = summation_of_cost_derivative(UpperCAmelCase , UpperCAmelCase ) / m
return cost_derivative_value
def a__ ( ) -> List[Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase : List[str] = 0.000002
UpperCAmelCase : Any = 0
UpperCAmelCase : Dict = 0
while True:
j += 1
UpperCAmelCase : List[Any] = [0, 0, 0, 0]
for i in range(0 , len(UpperCAmelCase ) ):
UpperCAmelCase : List[str] = get_cost_derivative(i - 1 )
UpperCAmelCase : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCAmelCase , UpperCAmelCase , atol=UpperCAmelCase , rtol=UpperCAmelCase , ):
break
UpperCAmelCase : int = temp_parameter_vector
print(('''Number of iterations:''', j) )
def a__ ( ) -> List[Any]:
for i in range(len(UpperCAmelCase ) ):
print(('''Actual output value:''', output(UpperCAmelCase , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(UpperCAmelCase , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 336 | 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = emb.weight.shape
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : str = emb.weight.data
return lin_layer
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
_SCREAMING_SNAKE_CASE : int = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
_SCREAMING_SNAKE_CASE : Dict = mam_aaa['''model''']
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : List[str] = state_dict['''encoder.embed_tokens.weight'''].shape[0]
_SCREAMING_SNAKE_CASE : Optional[int] = MaMaaaConfig(
vocab_size=SCREAMING_SNAKE_CASE__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
_SCREAMING_SNAKE_CASE : List[str] = state_dict['''decoder.embed_tokens.weight''']
_SCREAMING_SNAKE_CASE : Dict = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
model.model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : int = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
UpperCAmelCase_ : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 200 |
def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Optional[Any]:
UpperCAmelCase : List[str] = 0
UpperCAmelCase : List[Any] = len(UpperCAmelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCAmelCase ):
return None
UpperCAmelCase : Optional[Any] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
UpperCAmelCase : Any = left
UpperCAmelCase : List[str] = point
elif point > right:
UpperCAmelCase : Any = right
UpperCAmelCase : List[str] = point
else:
if item < current_item:
UpperCAmelCase : Optional[int] = point - 1
else:
UpperCAmelCase : str = point + 1
return None
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] ) -> Dict:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCAmelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
elif point > right:
return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , point - 1 )
else:
return interpolation_search_by_recursion(
UpperCAmelCase , UpperCAmelCase , point + 1 , UpperCAmelCase )
def a__ ( UpperCAmelCase : Union[str, Any] ) -> int:
if collection != sorted(UpperCAmelCase ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
_lowerCamelCase : Optional[int] = 0
if debug == 1:
_lowerCamelCase : Dict = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
_lowerCamelCase : List[Any] = 6_7
_lowerCamelCase : Optional[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 336 | 0 |
"""simple docstring"""
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowercase__ = HfApi()
lowercase__ = {}
# fmt: off
lowercase__ = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
lowercase__ = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
lowercase__ = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
lowercase__ = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
lowercase__ = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
lowercase__ = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
lowercase__ = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
lowercase__ = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
lowercase__ = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
lowercase__ = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
lowercase__ = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
lowercase__ = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
lowercase__ = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
lowercase__ = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
lowercase__ = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
lowercase__ = api.list_models(filter='diffusers')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowercase__ = "/home/patrick/google_checkpoints/" + mod.modelId.split('/')[-1]
print(f"Started running {mod.modelId}!!!")
if mod.modelId.startswith('CompVis'):
lowercase__ = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet')
else:
lowercase__ = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowercase__ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowercase__ = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowercase__ = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1E-3
)
print(f"{mod.modelId} has passed successfully!!!")
| 290 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : List[str]=False ) -> Any:
UpperCAmelCase : Optional[int] = '''backbone.''' if is_semantic else ''''''
UpperCAmelCase : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', '''beit.embeddings.cls_token'''),
(f'''{prefix}patch_embed.proj.weight''', '''beit.embeddings.patch_embeddings.projection.weight'''),
(f'''{prefix}patch_embed.proj.bias''', '''beit.embeddings.patch_embeddings.projection.bias'''),
(f'''{prefix}pos_embed''', '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str=False , UpperCAmelCase : Dict=False ) -> Any:
for i in range(config.num_hidden_layers ):
UpperCAmelCase : Tuple = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
UpperCAmelCase : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
UpperCAmelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase : str = q_bias
UpperCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : int = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCAmelCase : int = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
UpperCAmelCase : str = gamma_a
UpperCAmelCase : Dict = gamma_a
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase )
UpperCAmelCase : str = val
def a__ ( ) -> Optional[int]:
UpperCAmelCase : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Union[str, Any] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any]=False ) -> Union[str, Any]:
UpperCAmelCase : Dict = False if '''rvlcdip''' in checkpoint_url else True
UpperCAmelCase : Any = BeitConfig(use_absolute_position_embeddings=UpperCAmelCase , use_mask_token=UpperCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCAmelCase : List[Any] = 1_024
UpperCAmelCase : Optional[Any] = 4_096
UpperCAmelCase : Any = 24
UpperCAmelCase : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCAmelCase : Optional[Any] = 16
UpperCAmelCase : List[Any] = '''huggingface/label-files'''
UpperCAmelCase : Any = '''rvlcdip-id2label.json'''
UpperCAmelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Dict = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : Union[str, Any] = idalabel
UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCAmelCase : Tuple = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='''cpu''' )['''model''']
UpperCAmelCase : List[str] = create_rename_keys(UpperCAmelCase , has_lm_head=UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
read_in_q_k_v(UpperCAmelCase , UpperCAmelCase , has_lm_head=UpperCAmelCase )
# load HuggingFace model
UpperCAmelCase : Tuple = BeitForMaskedImageModeling(UpperCAmelCase ) if has_lm_head else BeitForImageClassification(UpperCAmelCase )
model.eval()
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image
UpperCAmelCase : Dict = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase )
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : Optional[Any] = image_processor(images=UpperCAmelCase , return_tensors='''pt''' )
UpperCAmelCase : str = encoding['''pixel_values''']
UpperCAmelCase : Any = model(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify logits
UpperCAmelCase : List[Any] = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(UpperCAmelCase ), "Shape of logits not as expected"
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
if has_lm_head:
UpperCAmelCase : List[Any] = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
UpperCAmelCase : Any = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCAmelCase , )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 336 | 0 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = str(id_ )
UpperCAmelCase__ : int = None
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Any = {} # {vertex:distance}
def __lt__(self , _lowerCamelCase ):
"""simple docstring"""
return self.key < other.key
def __repr__(self ):
"""simple docstring"""
return self.id
def _a (self , _lowerCamelCase ):
"""simple docstring"""
self.neighbors.append(__A )
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = weight
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Dict:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowerCAmelCase )
graph[b - 1].add_edge(graph[a - 1] , lowerCAmelCase )
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> list:
UpperCAmelCase__ : Optional[Any] = []
for u in graph:
UpperCAmelCase__ : int = math.inf
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : List[Any] = graph[:]
while q:
UpperCAmelCase__ : int = min(lowerCAmelCase )
q.remove(lowerCAmelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
UpperCAmelCase__ : Tuple = u
UpperCAmelCase__ : List[str] = u.edges[v.id]
for i in range(1 , len(lowerCAmelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Iterator[tuple]:
for u in graph:
UpperCAmelCase__ : Optional[Any] = math.inf
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Tuple = list(lowerCAmelCase )
hq.heapify(lowerCAmelCase )
while h:
UpperCAmelCase__ : int = hq.heappop(lowerCAmelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
UpperCAmelCase__ : int = u
UpperCAmelCase__ : Any = u.edges[v.id]
hq.heapify(lowerCAmelCase )
for i in range(1 , len(lowerCAmelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def a__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 171 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[int], __A : Optional[int], __A : Any=1_3, __A : str=7, __A : Optional[int]=True, __A : Tuple=True, __A : Union[str, Any]=True, __A : Any=True, __A : Optional[int]=9_9, __A : Tuple=3_2, __A : str=5, __A : Union[str, Any]=4, __A : List[str]=3_7, __A : Tuple="gelu", __A : Optional[int]=0.1, __A : int=0.1, __A : Optional[Any]=5_1_2, __A : int=1_6, __A : Optional[Any]=2, __A : Union[str, Any]=0.0_2, __A : Optional[int]=4, ):
UpperCAmelCase : Any = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Any = seq_length
UpperCAmelCase : Tuple = is_training
UpperCAmelCase : str = use_attention_mask
UpperCAmelCase : List[str] = use_token_type_ids
UpperCAmelCase : int = use_labels
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : List[Any] = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = num_choices
def __magic_name__ ( self : str ):
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase : List[Any] = None
if self.use_attention_mask:
UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Any = None
if self.use_token_type_ids:
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = RobertaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__A, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def __magic_name__ ( self : int ):
UpperCAmelCase : Any = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = config_and_inputs
UpperCAmelCase : Any = True
UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = True
UpperCamelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Dict = FlaxRobertaModelTester(self )
@slow
def __magic_name__ ( self : Any ):
for model_class_name in self.all_model_classes:
UpperCAmelCase : Dict = model_class_name.from_pretrained('''roberta-base''', from_pt=__A )
UpperCAmelCase : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
| 336 | 0 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def __UpperCamelCase ( _A , _A , _A , _A ):
lowerCAmelCase_ = s.rsplit(_A , _A )
return new.join(_A )
def __UpperCamelCase ( _A ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = ['''group_1''', '''group_2''', '''group_3''', '''group_4''']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCAmelCase_ = key.replace(f"{group_key}." , f"{group_key}.group." )
if "res_path" in key:
lowerCAmelCase_ = key.replace('''res_path.''' , '''res_path.path.''' )
if key.endswith('''.w''' ):
lowerCAmelCase_ = rreplace(_A , '''.w''' , '''.weight''' , 1 )
if key.endswith('''.b''' ):
lowerCAmelCase_ = rreplace(_A , '''.b''' , '''.bias''' , 1 )
lowerCAmelCase_ = value.float()
return upgrade
@torch.no_grad()
def __UpperCamelCase ( _A , _A , _A=None , _A=True ):
from dall_e import Encoder
lowerCAmelCase_ = Encoder()
if os.path.exists(_A ):
lowerCAmelCase_ = torch.load(_A )
else:
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A )
if isinstance(_A , _A ):
lowerCAmelCase_ = ckpt.state_dict()
encoder.load_state_dict(_A )
if config_path is not None:
lowerCAmelCase_ = FlavaImageCodebookConfig.from_pretrained(_A )
else:
lowerCAmelCase_ = FlavaImageCodebookConfig()
lowerCAmelCase_ = FlavaImageCodebook(_A ).eval()
lowerCAmelCase_ = encoder.state_dict()
lowerCAmelCase_ = upgrade_state_dict(_A )
hf_model.load_state_dict(_A )
lowerCAmelCase_ = hf_model.state_dict()
lowerCAmelCase_ = count_parameters(_A )
lowerCAmelCase_ = count_parameters(_A )
assert torch.allclose(_A , _A , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(_A )
else:
return hf_state_dict
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
_A = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 278 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Dict = {"vocab_file": "vocab.txt"}
_lowerCamelCase : List[str] = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_lowerCamelCase : List[Any] = {
"facebook/esm2_t6_8M_UR50D": 1_0_2_4,
"facebook/esm2_t12_35M_UR50D": 1_0_2_4,
}
def a__ ( UpperCAmelCase : List[str] ) -> Any:
with open(UpperCAmelCase , '''r''' ) as f:
UpperCAmelCase : Dict = f.read().splitlines()
return [l.strip() for l in lines]
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Any, __A : Dict, __A : List[Any]="<unk>", __A : List[str]="<cls>", __A : Any="<pad>", __A : Union[str, Any]="<mask>", __A : int="<eos>", **__A : Tuple, ):
super().__init__(**__A )
UpperCAmelCase : Tuple = load_vocab_file(__A )
UpperCAmelCase : List[Any] = dict(enumerate(self.all_tokens ) )
UpperCAmelCase : str = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCAmelCase : Union[str, Any] = unk_token
UpperCAmelCase : Optional[Any] = cls_token
UpperCAmelCase : Optional[int] = pad_token
UpperCAmelCase : Optional[int] = mask_token
UpperCAmelCase : List[str] = eos_token
UpperCAmelCase : Optional[Any] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __magic_name__ ( self : Tuple, __A : int ):
return self._id_to_token.get(__A, self.unk_token )
def __magic_name__ ( self : List[Any], __A : str ):
return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) )
def __magic_name__ ( self : Any, __A : Optional[Any], **__A : Union[str, Any] ):
return text.split()
def __magic_name__ ( self : Optional[int], __A : Dict=False ):
return len(self._id_to_token )
def __magic_name__ ( self : int ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __magic_name__ ( self : Tuple, __A : str ):
return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) )
def __magic_name__ ( self : Any, __A : int ):
return self._id_to_token.get(__A, self.unk_token )
def __magic_name__ ( self : Union[str, Any], __A : List[int], __A : Optional[List[int]] = None ):
UpperCAmelCase : Optional[int] = [self.cls_token_id]
UpperCAmelCase : Optional[int] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __magic_name__ ( self : Any, __A : List, __A : Optional[List] = None, __A : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCAmelCase : Dict = [1] + ([0] * len(__A )) + [1]
if token_ids_a is not None:
mask += [0] * len(__A ) + [1]
return mask
def __magic_name__ ( self : Optional[int], __A : List[Any], __A : Dict ):
UpperCAmelCase : Union[str, Any] = os.path.join(__A, (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(__A, '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __magic_name__ ( self : Dict ):
return self.get_vocab_size(with_added_tokens=__A )
def __magic_name__ ( self : Optional[int], __A : Union[List[str], List[AddedToken]], __A : bool = False ):
return super()._add_tokens(__A, special_tokens=__A )
| 336 | 0 |
__snake_case = [
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 310 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A, '''tf_padding''' ) )
self.parent.assertTrue(hasattr(__A, '''depth_multiplier''' ) )
class __UpperCAmelCase :
def __init__( self : int, __A : List[Any], __A : str=1_3, __A : Dict=3, __A : int=3_2, __A : int=0.2_5, __A : List[str]=8, __A : int=8, __A : Dict=6, __A : str=3_2, __A : Any=True, __A : str=True, __A : int=True, __A : Union[str, Any]="relu6", __A : Any=1_2_8_0, __A : List[Any]=0.1, __A : Optional[Any]=0.0_2, __A : Tuple=True, __A : List[Any]=True, __A : str=1_0, __A : Optional[Any]=None, ):
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : str = image_size
UpperCAmelCase : Optional[int] = depth_multiplier
UpperCAmelCase : Union[str, Any] = depth_divisible_by
UpperCAmelCase : Optional[Any] = min_depth
UpperCAmelCase : List[str] = expand_ratio
UpperCAmelCase : Dict = tf_padding
UpperCAmelCase : str = output_stride
UpperCAmelCase : Union[str, Any] = first_layer_is_expansion
UpperCAmelCase : List[Any] = finegrained_output
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCAmelCase : Optional[Any] = classifier_dropout_prob
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : Tuple = num_labels
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Any = scope
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Dict = None
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size], self.num_labels )
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ ( self : Any ):
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, depth_divisible_by=self.depth_divisible_by, min_depth=self.min_depth, expand_ratio=self.expand_ratio, output_stride=self.output_stride, first_layer_is_expansion=self.first_layer_is_expansion, finegrained_output=self.finegrained_output, hidden_act=self.hidden_act, tf_padding=self.tf_padding, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def __magic_name__ ( self : List[Any], __A : Dict, __A : Optional[Any], __A : Optional[int], __A : Union[str, Any] ):
UpperCAmelCase : Any = MobileNetVaModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[Any] = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
self.parent.assertEqual(
result.pooler_output.shape, (self.batch_size, self.last_hidden_size), )
def __magic_name__ ( self : str, __A : Union[str, Any], __A : Dict, __A : Optional[Any], __A : str ):
UpperCAmelCase : Optional[int] = self.num_labels
UpperCAmelCase : Any = MobileNetVaForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[int] = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[Any], __A : Optional[Any], __A : List[str], __A : Dict, __A : Dict ):
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Dict = MobileNetVaForSemanticSegmentation(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
UpperCAmelCase : Optional[Any] = model(__A, labels=__A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = config_and_inputs
UpperCAmelCase : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[Any] = MobileNetVaModelTester(self )
UpperCAmelCase : List[Any] = MobileNetVaConfigTester(self, config_class=__A, has_text_modality=__A )
def __magic_name__ ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def __magic_name__ ( self : Optional[int] ):
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def __magic_name__ ( self : Tuple ):
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def __magic_name__ ( self : Any ):
pass
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(__A )
UpperCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : int ):
def check_hidden_states_output(__A : Any, __A : Optional[Any], __A : str ):
UpperCAmelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(__A, __A ) )
UpperCAmelCase : Optional[Any] = outputs.hidden_states
UpperCAmelCase : List[Any] = 1_6
self.assertEqual(len(__A ), __A )
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : int ):
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
@slow
def __magic_name__ ( self : Dict ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] = MobileNetVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> int:
UpperCAmelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : List[Any] ):
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : List[Any] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(__A )
UpperCAmelCase : Optional[int] = self.default_image_processor
UpperCAmelCase : Optional[Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : str = model(**__A )
# verify the logits
UpperCAmelCase : int = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Tuple = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@slow
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCAmelCase : List[Any] = model.to(__A )
UpperCAmelCase : Tuple = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : int = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(**__A )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify the logits
UpperCAmelCase : Tuple = torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape, __A )
UpperCAmelCase : Tuple = torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
], device=__A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], __A, atol=1E-4 ) )
| 336 | 0 |
from math import factorial, radians
def _lowerCAmelCase ( lowerCAmelCase_ :float , lowerCAmelCase_ :int = 18 , lowerCAmelCase_ :int = 10 )->float:
'''simple docstring'''
snake_case_ = angle_in_degrees - ((angle_in_degrees // 3_6_0.0) * 3_6_0.0)
# Converting from degrees to radians
snake_case_ = radians(lowerCAmelCase_ )
snake_case_ = angle_in_radians
snake_case_ = 3
snake_case_ = -1
for _ in range(lowerCAmelCase_ ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase_ )
snake_case_ = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 159 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """codegen"""
UpperCamelCase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any, __A : Optional[int]=5_0_4_0_0, __A : Tuple=2_0_4_8, __A : Optional[int]=2_0_4_8, __A : List[str]=4_0_9_6, __A : List[str]=2_8, __A : Union[str, Any]=1_6, __A : Tuple=6_4, __A : Union[str, Any]=None, __A : Union[str, Any]="gelu_new", __A : Any=0.0, __A : Dict=0.0, __A : str=0.0, __A : Optional[int]=1E-5, __A : Any=0.0_2, __A : Any=True, __A : Union[str, Any]=5_0_2_5_6, __A : List[str]=5_0_2_5_6, __A : int=False, **__A : List[Any], ):
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Tuple = n_ctx
UpperCAmelCase : Tuple = n_positions
UpperCAmelCase : Optional[int] = n_embd
UpperCAmelCase : Union[str, Any] = n_layer
UpperCAmelCase : List[str] = n_head
UpperCAmelCase : Tuple = n_inner
UpperCAmelCase : int = rotary_dim
UpperCAmelCase : List[Any] = activation_function
UpperCAmelCase : List[str] = resid_pdrop
UpperCAmelCase : Optional[Any] = embd_pdrop
UpperCAmelCase : str = attn_pdrop
UpperCAmelCase : Tuple = layer_norm_epsilon
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : Any = bos_token_id
UpperCAmelCase : List[str] = eos_token_id
super().__init__(
bos_token_id=__A, eos_token_id=__A, tie_word_embeddings=__A, **__A )
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Any, __A : PretrainedConfig, __A : str = "default", __A : List[PatchingSpec] = None, __A : bool = False, ):
super().__init__(__A, task=__A, patching_specs=__A, use_past=__A )
if not getattr(self._config, '''pad_token_id''', __A ):
# TODO: how to do that better?
UpperCAmelCase : Union[str, Any] = 0
@property
def __magic_name__ ( self : str ):
UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__A, direction='''inputs''' )
UpperCAmelCase : int = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __magic_name__ ( self : Dict ):
return self._config.n_layer
@property
def __magic_name__ ( self : List[str] ):
return self._config.n_head
def __magic_name__ ( self : str, __A : PreTrainedTokenizer, __A : int = -1, __A : int = -1, __A : bool = False, __A : Optional[TensorType] = None, ):
UpperCAmelCase : Union[str, Any] = super(__A, self ).generate_dummy_inputs(
__A, batch_size=__A, seq_length=__A, is_pair=__A, framework=__A )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase : str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase : str = seqlen + 2
UpperCAmelCase : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase : Optional[int] = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
UpperCAmelCase : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
UpperCAmelCase : Optional[Any] = ordered_inputs['''attention_mask'''].dtype
UpperCAmelCase : Dict = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__A, __A, dtype=__A )], dim=1 )
return ordered_inputs
@property
def __magic_name__ ( self : Tuple ):
return 1_3
| 336 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
_a = "https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
_a = BASE_URL + "/user"
# https://github.com/settings/tokens
_a = os.environ.get('USER_TOKEN', '')
def _A ( UpperCamelCase_ : str) -> dict[Any, Any]:
'''simple docstring'''
__lowercase = {
'''Authorization''': F"""token {auth_token}""",
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(UpperCamelCase_, headers=UpperCamelCase_).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"{key}: {value}")
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.')
| 17 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 336 | 0 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
A : str = logging.getLogger(__name__)
@dataclass(frozen=lowerCamelCase__)
class _lowercase :
"""simple docstring"""
A__ = 42
A__ = 42
A__ = None
A__ = None
A__ = None
@dataclass(frozen=lowerCamelCase__)
class _lowercase :
"""simple docstring"""
A__ = 42
A__ = None
A__ = None
A__ = None
A__ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _lowercase ( lowerCamelCase__):
"""simple docstring"""
A__ = 42
def __init__( self : int , __lowerCamelCase : str , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : str , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : bool = False , ):
'''simple docstring'''
lowerCamelCase__ : int = hans_processors[task]()
lowerCamelCase__ : List[str] = os.path.join(
__A , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(__A ) , __A , ) , )
lowerCamelCase__ : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ : List[Any] = label_list[2], label_list[1]
lowerCamelCase__ : str = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ : List[str] = cached_features_file + '''.lock'''
with FileLock(__A ):
if os.path.exists(__A ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
lowerCamelCase__ : List[Any] = torch.load(__A )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
lowerCamelCase__ : List[str] = (
processor.get_dev_examples(__A ) if evaluate else processor.get_train_examples(__A )
)
logger.info("Training examples: %s" , len(__A ) )
lowerCamelCase__ : int = hans_convert_examples_to_features(__A , __A , __A , __A )
logger.info("Saving features into cached file %s" , __A )
torch.save(self.features , __A )
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : Union[str, Any] , __lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.features[i]
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _lowercase :
"""simple docstring"""
A__ = 42
def __init__( self : str , __lowerCamelCase : str , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : str , __lowerCamelCase : Optional[int] = 128 , __lowerCamelCase : List[Any]=False , __lowerCamelCase : bool = False , ):
'''simple docstring'''
lowerCamelCase__ : Tuple = hans_processors[task]()
lowerCamelCase__ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ : Dict = label_list[2], label_list[1]
lowerCamelCase__ : Tuple = label_list
lowerCamelCase__ : Union[str, Any] = processor.get_dev_examples(__A ) if evaluate else processor.get_train_examples(__A )
lowerCamelCase__ : List[Any] = hans_convert_examples_to_features(__A , __A , __A , __A )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(__A )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCamelCase__ : str = tf.data.Dataset.from_generator(
__A , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.dataset
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : Tuple , __lowerCamelCase : Any ):
'''simple docstring'''
return self.features[i]
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return self.label_list
class _lowercase ( lowerCamelCase__):
"""simple docstring"""
def lowerCAmelCase ( self : Dict , __lowerCamelCase : str ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(__A , "heuristics_train_set.txt" ) ) , "train" )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(__A , "heuristics_evaluation_set.txt" ) ) , "dev" )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : str = []
for i, line in enumerate(__A ):
if i == 0:
continue
lowerCamelCase__ : List[str] = '''%s-%s''' % (set_type, line[0])
lowerCamelCase__ : str = line[5]
lowerCamelCase__ : Optional[Any] = line[6]
lowerCamelCase__ : Tuple = line[7][2:] if line[7].startswith("ex" ) else line[7]
lowerCamelCase__ : Dict = line[0]
examples.append(InputExample(guid=__A , text_a=__A , text_b=__A , label=__A , pairID=__A ) )
return examples
def lowercase_ ( _A : List[InputExample] , _A : List[str] , _A : int , _A : PreTrainedTokenizer , ):
"""simple docstring"""
lowerCamelCase__ : str = {label: i for i, label in enumerate(_A )}
lowerCamelCase__ : List[str] = []
for ex_index, example in tqdm.tqdm(enumerate(_A ) , desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d" % (ex_index) )
lowerCamelCase__ : List[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=_A , max_length=_A , padding="max_length" , truncation=_A , return_overflowing_tokens=_A , )
lowerCamelCase__ : List[Any] = label_map[example.label] if example.label in label_map else 0
lowerCamelCase__ : Dict = int(example.pairID )
features.append(InputFeatures(**_A , label=_A , pairID=_A ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(F"guid: {example}" )
logger.info(F"features: {features[i]}" )
return features
A : List[str] = {
"hans": 3,
}
A : int = {
"hans": HansProcessor,
}
| 184 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __UpperCAmelCase :
# setable values
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None # sigma(t_i)
@classmethod
def __magic_name__ ( cls : Any ):
return cls()
@dataclass
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@property
def __magic_name__ ( self : Optional[int] ):
return True
@register_to_config
def __init__( self : Optional[int], __A : float = 0.0_2, __A : float = 1_0_0, __A : float = 1.0_0_7, __A : float = 8_0, __A : float = 0.0_5, __A : float = 5_0, ):
pass
def __magic_name__ ( self : Optional[Any] ):
return KarrasVeSchedulerState.create()
def __magic_name__ ( self : int, __A : KarrasVeSchedulerState, __A : int, __A : Tuple = () ):
UpperCAmelCase : Optional[Any] = jnp.arange(0, __A )[::-1].copy()
UpperCAmelCase : Union[str, Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__A, schedule=jnp.array(__A, dtype=jnp.floataa ), timesteps=__A, )
def __magic_name__ ( self : List[Any], __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : random.KeyArray, ):
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase : int = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1 )
else:
UpperCAmelCase : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase : Union[str, Any] = random.split(__A, num=1 )
UpperCAmelCase : List[str] = self.config.s_noise * random.normal(key=__A, shape=sample.shape )
UpperCAmelCase : Tuple = sigma + gamma * sigma
UpperCAmelCase : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : int = sample_hat + sigma_hat * model_output
UpperCAmelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase : int = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : jnp.ndarray, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : Tuple = sample_prev + sigma_prev * model_output
UpperCAmelCase : List[str] = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Optional[Any], __A : KarrasVeSchedulerState, __A : Optional[int], __A : int, __A : Union[str, Any] ):
raise NotImplementedError()
| 336 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __SCREAMING_SNAKE_CASE :
A : int
A : Node | None = None
A : Node | None = None
def __lowercase ( ) ->Node | None:
"""simple docstring"""
lowercase : Optional[Any] = Node(1 )
lowercase : Union[str, Any] = Node(2 )
lowercase : Tuple = Node(3 )
lowercase : Optional[Any] = Node(4 )
lowercase : Optional[int] = Node(5 )
return tree
def __lowercase ( _UpperCamelCase ) ->list[int]:
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def __lowercase ( _UpperCamelCase ) ->list[int]:
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def __lowercase ( _UpperCamelCase ) ->list[int]:
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
return (max(height(root.left ), height(root.right ) ) + 1) if root else 0
def __lowercase ( _UpperCamelCase ) ->Sequence[Node | None]:
"""simple docstring"""
lowercase : list[Any] = []
if root is None:
return output
lowercase : Dict = deque([root] )
while process_queue:
lowercase : Dict = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Sequence[Node | None]:
"""simple docstring"""
lowercase : list[Any] = []
def populate_output(_UpperCamelCase, _UpperCamelCase ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left, level - 1 )
populate_output(root.right, level - 1 )
populate_output(_UpperCamelCase, _UpperCamelCase )
return output
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Sequence[Node | None]:
"""simple docstring"""
lowercase : list[Any] = []
def populate_output(_UpperCamelCase, _UpperCamelCase ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right, level - 1 )
populate_output(root.left, level - 1 )
populate_output(_UpperCamelCase, _UpperCamelCase )
return output
def __lowercase ( _UpperCamelCase ) ->Sequence[Node | None] | list[Any]:
"""simple docstring"""
if root is None:
return []
lowercase : list[Sequence[Node | None]] = []
lowercase : List[str] = 0
lowercase : Any = height(_UpperCamelCase )
for h in range(1, height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_UpperCamelCase, _UpperCamelCase ) )
lowercase : Optional[int] = 1
else:
output.append(get_nodes_from_right_to_left(_UpperCamelCase, _UpperCamelCase ) )
lowercase : Optional[Any] = 0
return output
def __lowercase ( ) ->None: # Main function for testing.
"""simple docstring"""
lowercase : Optional[Any] = make_tree()
print(f"""In-order Traversal: {inorder(_UpperCamelCase )}""" )
print(f"""Pre-order Traversal: {preorder(_UpperCamelCase )}""" )
print(f"""Post-order Traversal: {postorder(_UpperCamelCase )}""", '''\n''' )
print(f"""Height of Tree: {height(_UpperCamelCase )}""", '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(_UpperCamelCase ), '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1, height(_UpperCamelCase ) + 1 ):
print(f"""Level {level}:""", get_nodes_from_left_to_right(_UpperCamelCase, level=_UpperCamelCase ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(_UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 337 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __SCREAMING_SNAKE_CASE ( pl.LightningModule ):
def __init__( self , SCREAMING_SNAKE_CASE__ ):
super().__init__()
lowercase : Any = model
lowercase : Optional[Any] = 2
lowercase : Optional[int] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __lowerCamelCase ( self ):
pass
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : str = LongformerModel.from_pretrained(_UpperCamelCase )
lowercase : int = LightningModel(_UpperCamelCase )
lowercase : Union[str, Any] = torch.load(_UpperCamelCase, map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
lowercase : List[Any] = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 337 | 1 |
from __future__ import annotations
from typing import Any
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
if not postfix_notation:
return 0
lowercase : List[str] = {'''+''', '''-''', '''*''', '''/'''}
lowercase : list[Any] = []
for token in postfix_notation:
if token in operations:
lowercase , lowercase : Union[str, Any] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_UpperCamelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Any = 'yolos'
def __init__( self , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[512, 864] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=100 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = hidden_size
lowercase : int = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : str = intermediate_size
lowercase : Dict = hidden_act
lowercase : int = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : List[Any] = initializer_range
lowercase : Optional[int] = layer_norm_eps
lowercase : str = image_size
lowercase : Dict = patch_size
lowercase : str = num_channels
lowercase : Optional[int] = qkv_bias
lowercase : List[str] = num_detection_tokens
lowercase : List[str] = use_mid_position_embeddings
lowercase : Dict = auxiliary_loss
# Hungarian matcher
lowercase : Optional[Any] = class_cost
lowercase : Any = bbox_cost
lowercase : int = giou_cost
# Loss coefficients
lowercase : Dict = bbox_loss_coefficient
lowercase : Optional[Any] = giou_loss_coefficient
lowercase : Tuple = eos_coefficient
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = version.parse('1.11' )
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
@property
def __lowerCamelCase ( self ):
return 12
| 337 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__a = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowercase : Optional[int] = torch.manual_seed(0 )
lowercase : int = pipe.dual_guided(
prompt='''first prompt''' , image=SCREAMING_SNAKE_CASE__ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = VersatileDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = generator.manual_seed(0 )
lowercase : Tuple = pipe.dual_guided(
prompt='''first prompt''' , image=SCREAMING_SNAKE_CASE__ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase : str = '''cyberpunk 2077'''
lowercase : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowercase : Optional[int] = torch.manual_seed(0 )
lowercase : List[Any] = pipe.dual_guided(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
lowercase : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase : Any = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase : Optional[int] = '''A painting of a squirrel eating a burger '''
lowercase : int = torch.manual_seed(0 )
lowercase : int = pipe.text_to_image(
prompt=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
lowercase : Union[str, Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase : List[str] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase : Tuple = pipe.image_variation(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , output_type='''numpy''' ).images
lowercase : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase : Optional[int] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 337 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__a = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(_UpperCamelCase ), version.parse(_UpperCamelCase ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->None:
"""simple docstring"""
lowercase : List[Any] = f"""\n{hint}""" if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''', _UpperCamelCase ):
lowercase , lowercase , lowercase : Optional[Any] = requirement, None, None
else:
lowercase : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f""" got {requirement}""" )
lowercase , lowercase : str = match[0]
lowercase : Tuple = want_full.split(''',''' ) # there could be multiple requirements
lowercase : List[Any] = {}
for w in want_range:
lowercase : str = re.findall(R'''^([\s!=<>]{1,2})(.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f""" but got {requirement}""" )
lowercase , lowercase : Optional[int] = match[0]
lowercase : Dict = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
lowercase : int = '''.'''.join([str(_UpperCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
return
# check if any version is installed
try:
lowercase : List[str] = importlib.metadata.version(_UpperCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
lowercase : Optional[int] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(_UpperCamelCase, _UpperCamelCase )
| 337 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
__a = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'deta'
A : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=900 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="sine" , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=300 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.25 , **SCREAMING_SNAKE_CASE__ , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase : Tuple = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = backbone_config.pop('''model_type''' )
lowercase : Any = CONFIG_MAPPING[backbone_model_type]
lowercase : List[Any] = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = backbone_config
lowercase : Union[str, Any] = num_queries
lowercase : Any = max_position_embeddings
lowercase : int = d_model
lowercase : Any = encoder_ffn_dim
lowercase : Optional[int] = encoder_layers
lowercase : Tuple = encoder_attention_heads
lowercase : Optional[Any] = decoder_ffn_dim
lowercase : Optional[int] = decoder_layers
lowercase : int = decoder_attention_heads
lowercase : Any = dropout
lowercase : int = attention_dropout
lowercase : Dict = activation_dropout
lowercase : int = activation_function
lowercase : Dict = init_std
lowercase : List[str] = init_xavier_std
lowercase : Optional[Any] = encoder_layerdrop
lowercase : Tuple = auxiliary_loss
lowercase : Tuple = position_embedding_type
# deformable attributes
lowercase : List[str] = num_feature_levels
lowercase : Tuple = encoder_n_points
lowercase : Optional[int] = decoder_n_points
lowercase : Tuple = two_stage
lowercase : Optional[Any] = two_stage_num_proposals
lowercase : Union[str, Any] = with_box_refine
lowercase : Any = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
lowercase : Optional[Any] = class_cost
lowercase : str = bbox_cost
lowercase : List[Any] = giou_cost
# Loss coefficients
lowercase : Tuple = mask_loss_coefficient
lowercase : Any = dice_loss_coefficient
lowercase : Dict = bbox_loss_coefficient
lowercase : Tuple = giou_loss_coefficient
lowercase : Union[str, Any] = eos_coefficient
lowercase : Tuple = focal_alpha
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCamelCase ( self ):
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self ):
return self.d_model
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Any = self.backbone_config.to_dict()
lowercase : List[str] = self.__class__.model_type
return output
| 337 | 1 |
from functools import lru_cache
def __lowercase ( _UpperCamelCase ) ->set:
"""simple docstring"""
lowercase : Optional[int] = 2
lowercase : Optional[Any] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_UpperCamelCase )
if n > 1:
factors.add(_UpperCamelCase )
return factors
@lru_cache
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
return len(unique_prime_factors(_UpperCamelCase ) )
def __lowercase ( _UpperCamelCase ) ->bool:
"""simple docstring"""
return len(set(_UpperCamelCase ) ) in (0, 1)
def __lowercase ( _UpperCamelCase ) ->list:
"""simple docstring"""
lowercase : List[Any] = 2
while True:
# Increment each value of a generated range
lowercase : Dict = [base + i for i in range(_UpperCamelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowercase : str = [upf_len(_UpperCamelCase ) for x in group]
checker.append(_UpperCamelCase )
# If all numbers in the list are equal, return the group variable.
if equality(_UpperCamelCase ):
return group
# Increment our base variable by 1
base += 1
def __lowercase ( _UpperCamelCase = 4 ) ->int:
"""simple docstring"""
lowercase : Dict = run(_UpperCamelCase )
return results[0] if len(_UpperCamelCase ) else None
if __name__ == "__main__":
print(solution())
| 337 |
def __lowercase ( ) ->List[Any]:
"""simple docstring"""
lowercase : Union[str, Any] = 0
for i in range(1, 1001 ):
total += i**i
return str(_UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 337 | 1 |
def __lowercase ( _UpperCamelCase ) ->bool:
"""simple docstring"""
return str(_UpperCamelCase ) == str(_UpperCamelCase )[::-1]
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
return int(_UpperCamelCase ) + int(str(_UpperCamelCase )[::-1] )
def __lowercase ( _UpperCamelCase = 10000 ) ->int:
"""simple docstring"""
lowercase : List[str] = []
for num in range(1, _UpperCamelCase ):
lowercase : Dict = 0
lowercase : Tuple = num
while iterations < 50:
lowercase : List[Any] = sum_reverse(_UpperCamelCase )
iterations += 1
if is_palindrome(_UpperCamelCase ):
break
else:
lychrel_nums.append(_UpperCamelCase )
return len(_UpperCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 337 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__a = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
lowercase : Any = self.diffusers_dir
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCamelCase ( self ):
lowercase : List[Any] = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase : Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowercase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowercase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowercase : List[Any] = black.format_str(SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , newline='''\n''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
self.assertTrue(f.read() , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Tuple = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with a really long name
lowercase : List[Any] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , overwrite_result=re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
| 337 | 1 |
__a = '''Input must be a string of 8 numbers plus letter'''
__a = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def __lowercase ( _UpperCamelCase ) ->bool:
"""simple docstring"""
if not isinstance(_UpperCamelCase, _UpperCamelCase ):
lowercase : Optional[Any] = f"""Expected string as input, found {type(_UpperCamelCase ).__name__}"""
raise TypeError(_UpperCamelCase )
lowercase : Optional[int] = spanish_id.replace('''-''', '''''' ).upper()
if len(_UpperCamelCase ) != 9:
raise ValueError(_UpperCamelCase )
try:
lowercase : Optional[Any] = int(spanish_id_clean[0:8] )
lowercase : Tuple = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_UpperCamelCase ) from ex
if letter.isdigit():
raise ValueError(_UpperCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__=0 ): # a graph with Node 0,1,...,N-1
lowercase : List[Any] = n
lowercase : List[Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # adjacency matrix for weight
lowercase : Union[str, Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = w
def __lowerCamelCase ( self ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowercase : Any = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.dp[u][v]
if __name__ == "__main__":
__a = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 337 | 1 |
import os
def __lowercase ( _UpperCamelCase = "matrix.txt" ) ->int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(_UpperCamelCase ), _UpperCamelCase ) ) as in_file:
lowercase : List[Any] = in_file.read()
lowercase : str = [[int(_UpperCamelCase ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
lowercase : Union[str, Any] = [[0 for cell in row] for row in grid]
lowercase : Any = len(grid[0] )
lowercase : Dict = [[0 for i in range(_UpperCamelCase )] for j in range(_UpperCamelCase )]
lowercase : str = grid[0][0]
for i in range(1, _UpperCamelCase ):
lowercase : str = grid[0][i] + dp[0][i - 1]
for i in range(1, _UpperCamelCase ):
lowercase : List[Any] = grid[i][0] + dp[i - 1][0]
for i in range(1, _UpperCamelCase ):
for j in range(1, _UpperCamelCase ):
lowercase : Tuple = grid[i][j] + min(dp[i - 1][j], dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 337 |
from __future__ import annotations
def __lowercase ( _UpperCamelCase ) ->float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(_UpperCamelCase ) / len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 1 |
from collections.abc import Iterable
from typing import Generic, TypeVar
__a = TypeVar('''_T''')
class __SCREAMING_SNAKE_CASE ( Generic[_T] ):
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
lowercase : list[_T] = list(iterable or [] )
lowercase : list[_T] = []
def __len__( self ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self ):
return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
self._stacka.append(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Dict = self._stacka.pop
lowercase : List[Any] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('''Queue is empty''' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 337 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__a = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 337 | 1 |
import sys
from collections import defaultdict
class __SCREAMING_SNAKE_CASE :
def __init__( self ):
lowercase : Optional[int] = []
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self.node_position[vertex]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = pos
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowercase : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowercase : str = 2 * start + 1
else:
lowercase : Dict = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowercase , lowercase : List[str] = heap[smallest_child], positions[smallest_child]
lowercase , lowercase : Optional[Any] = (
heap[start],
positions[start],
)
lowercase , lowercase : List[Any] = temp, tempa
lowercase : Optional[Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , SCREAMING_SNAKE_CASE__ )
self.top_to_bottom(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = position[index]
while index != 0:
lowercase : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowercase : Tuple = heap[parent]
lowercase : Tuple = position[parent]
self.set_position(position[parent] , SCREAMING_SNAKE_CASE__ )
else:
lowercase : int = val
lowercase : Any = temp
self.set_position(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
break
lowercase : Optional[int] = parent
else:
lowercase : str = val
lowercase : Union[str, Any] = temp
self.set_position(SCREAMING_SNAKE_CASE__ , 0 )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = len(SCREAMING_SNAKE_CASE__ ) // 2 - 1
for i in range(SCREAMING_SNAKE_CASE__ , -1 , -1 ):
self.top_to_bottom(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = positions[0]
lowercase : Optional[Any] = sys.maxsize
self.top_to_bottom(SCREAMING_SNAKE_CASE__ , 0 , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
return temp
def __lowercase ( _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
lowercase : Optional[Any] = Heap()
lowercase : List[str] = [0] * len(_UpperCamelCase )
lowercase : Dict = [-1] * len(_UpperCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowercase : str = [] # Heap of Distance of vertices from their neighboring vertex
lowercase : List[Any] = []
for vertex in range(len(_UpperCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCamelCase )
heap.node_position.append(_UpperCamelCase )
lowercase : Dict = []
lowercase : Any = 1
lowercase : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowercase : Dict = 0
lowercase : Union[str, Any] = distance
heap.heapify(_UpperCamelCase, _UpperCamelCase )
for _ in range(1, len(_UpperCamelCase ) ):
lowercase : List[Any] = heap.delete_minimum(_UpperCamelCase, _UpperCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowercase : int = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCamelCase )]
):
lowercase : Dict = distance
heap.bottom_to_top(
_UpperCamelCase, heap.get_position(_UpperCamelCase ), _UpperCamelCase, _UpperCamelCase )
lowercase : Union[str, Any] = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__a = int(input('''Enter number of edges: ''').strip())
__a = defaultdict(list)
for _ in range(edges_number):
__a = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 337 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__a = logging.get_logger(__name__)
def __lowercase ( _UpperCamelCase ) ->List[int]:
"""simple docstring"""
if isinstance(_UpperCamelCase, np.ndarray ):
return list(tensor.shape )
lowercase : Optional[Any] = tf.shape(_UpperCamelCase )
if tensor.shape == tf.TensorShape(_UpperCamelCase ):
return dynamic
lowercase : Tuple = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_UpperCamelCase )]
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None, _UpperCamelCase = None ) ->tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1e-9, axis=_UpperCamelCase, name=_UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=1e-5, _UpperCamelCase=-1 ) ->int:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_UpperCamelCase, _UpperCamelCase ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
lowercase , lowercase : Union[str, Any] = tf.nn.moments(_UpperCamelCase, axes=[axis], keepdims=_UpperCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase : int = [1] * inputs.shape.rank
lowercase : Union[str, Any] = shape_list(_UpperCamelCase )[axis]
lowercase : List[str] = tf.reshape(_UpperCamelCase, _UpperCamelCase )
lowercase : Dict = tf.reshape(_UpperCamelCase, _UpperCamelCase )
# Compute layer normalization using the batch_normalization
# function.
lowercase : List[str] = tf.nn.batch_normalization(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, offset=_UpperCamelCase, scale=_UpperCamelCase, variance_epsilon=_UpperCamelCase, )
return outputs
def __lowercase ( _UpperCamelCase, _UpperCamelCase=0, _UpperCamelCase=-1 ) ->List[Any]:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase : Dict = tf.shape(_UpperCamelCase )
lowercase : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase : List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]], axis=0 )
return tf.reshape(_UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->tf.Tensor:
"""simple docstring"""
if not isinstance(_UpperCamelCase, tf.Tensor ):
lowercase : Optional[Any] = tf.convert_to_tensor(_UpperCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase : Tuple = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase : List[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase : str = (
tf.cast(1, encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = "input_ids" ) ->None:
"""simple docstring"""
tf.debugging.assert_less(
_UpperCamelCase, tf.cast(_UpperCamelCase, dtype=tensor.dtype ), message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(_UpperCamelCase )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
), )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : List[Any] = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase : Optional[int] = [x for x in data if len(_UpperCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
lowercase : Any = np.asarray(_UpperCamelCase )
lowercase : List[Any] = 1
lowercase : Tuple = np.array_split(_UpperCamelCase, _UpperCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase : Dict = np.array_split(_UpperCamelCase, _UpperCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_UpperCamelCase ):
lowercase : Optional[int] = chunk_data
else:
lowercase : int = data
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
if name in group.attrs:
lowercase : str = [n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs[name]]
else:
lowercase : Optional[Any] = []
lowercase : List[str] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def __lowercase ( _UpperCamelCase ) ->List[str]:
"""simple docstring"""
def _expand_single_ad_tensor(_UpperCamelCase ):
if isinstance(_UpperCamelCase, tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_UpperCamelCase, axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor, _UpperCamelCase )
| 337 | 1 |
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
lowercase : Tuple = (boundary[1] - boundary[0]) / steps
lowercase : List[str] = boundary[0]
lowercase : Any = boundary[1]
lowercase : Union[str, Any] = make_points(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
lowercase : Optional[int] = 0.0
y += (h / 2.0) * f(_UpperCamelCase )
for i in x_i:
# print(i)
y += h * f(_UpperCamelCase )
y += (h / 2.0) * f(_UpperCamelCase )
return y
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
lowercase : Any = a + h
while x < (b - h):
yield x
lowercase : Any = x + h
def __lowercase ( _UpperCamelCase ) ->Optional[Any]: # enter your function here
"""simple docstring"""
lowercase : Optional[int] = (x - 0) * (x - 0)
return y
def __lowercase ( ) ->int:
"""simple docstring"""
lowercase : List[str] = 0.0 # Lower bound of integration
lowercase : List[Any] = 1.0 # Upper bound of integration
lowercase : int = 1_0.0 # define number of steps or resolution
lowercase : Optional[int] = [a, b] # define boundary of integration
lowercase : Optional[Any] = method_a(_UpperCamelCase, _UpperCamelCase )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 337 |
def __lowercase ( _UpperCamelCase = 4000000 ) ->int:
"""simple docstring"""
lowercase : int = []
lowercase , lowercase : str = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCamelCase )
lowercase , lowercase : Dict = b, a + b
return sum(_UpperCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 337 | 1 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
for param in module.parameters():
lowercase : Optional[int] = False
def __lowercase ( ) ->Tuple:
"""simple docstring"""
lowercase : Any = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase : str = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def __lowercase ( _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : List[str] = plt.imshow(_UpperCamelCase )
fig.axes.get_xaxis().set_visible(_UpperCamelCase )
fig.axes.get_yaxis().set_visible(_UpperCamelCase )
plt.show()
def __lowercase ( ) ->Optional[int]:
"""simple docstring"""
lowercase : Tuple = datetime.now()
lowercase : str = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 337 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__a = logging.get_logger(__name__)
__a = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'perceiver'
def __init__( self , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=1280 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=26 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="kv" , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=262 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=56 , SCREAMING_SNAKE_CASE__=[368, 496] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=1920 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[1, 16, 224, 224] , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Any = num_latents
lowercase : Union[str, Any] = d_latents
lowercase : str = d_model
lowercase : int = num_blocks
lowercase : str = num_self_attends_per_block
lowercase : List[str] = num_self_attention_heads
lowercase : List[str] = num_cross_attention_heads
lowercase : int = qk_channels
lowercase : List[Any] = v_channels
lowercase : int = cross_attention_shape_for_attention
lowercase : Tuple = self_attention_widening_factor
lowercase : Dict = cross_attention_widening_factor
lowercase : Any = hidden_act
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : Union[str, Any] = initializer_range
lowercase : Any = layer_norm_eps
lowercase : Any = use_query_residual
# masked language modeling attributes
lowercase : List[str] = vocab_size
lowercase : Dict = max_position_embeddings
# image classification attributes
lowercase : int = image_size
# flow attributes
lowercase : List[Any] = train_size
# multimodal autoencoding attributes
lowercase : List[Any] = num_frames
lowercase : Union[str, Any] = audio_samples_per_frame
lowercase : int = samples_per_patch
lowercase : Optional[int] = output_shape
class __SCREAMING_SNAKE_CASE ( A__ ):
@property
def __lowerCamelCase ( self ):
if self.task == "multiple-choice":
lowercase : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 40 , SCREAMING_SNAKE_CASE__ = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : str = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase : Union[str, Any] = preprocessor.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
lowercase : Optional[Any] = [''' '''.join(['''a'''] ) * seq_length] * batch_size
lowercase : Any = dict(preprocessor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''input_ids''' )
return inputs
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : List[str] = compute_effective_axis_dimension(SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase : List[str] = self._generate_dummy_images(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = dict(preprocessor(images=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 337 | 1 |
from __future__ import annotations
def __lowercase ( _UpperCamelCase ) ->list[int]:
"""simple docstring"""
if len(_UpperCamelCase ) == 0:
return array
lowercase , lowercase : Union[str, Any] = min(_UpperCamelCase ), max(_UpperCamelCase )
# Compute the variables
lowercase : Any = _max - _min + 1
lowercase , lowercase : Optional[Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
lowercase : Dict = i - _min
lowercase : List[Any] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
lowercase : Any = 0
for i in range(_UpperCamelCase ):
while holes_repeat[i] > 0:
lowercase : Any = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = input('''Enter numbers separated by comma:\n''')
__a = [int(x) for x in user_input.split(''',''')]
print(pigeon_sort(unsorted))
| 337 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __lowercase ( _UpperCamelCase = 8 ) ->str:
"""simple docstring"""
lowercase : List[str] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
i -= len(_UpperCamelCase )
lowercase : Dict = i // 3
lowercase : List[str] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowercase : Union[str, Any] = (
chars_incl
+ random(_UpperCamelCase, quotient + remainder )
+ random(_UpperCamelCase, _UpperCamelCase )
+ random(_UpperCamelCase, _UpperCamelCase )
)
lowercase : Union[str, Any] = list(_UpperCamelCase )
shuffle(_UpperCamelCase )
return "".join(_UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Dict:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase = 8 ) ->bool:
"""simple docstring"""
if len(_UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowercase : str = any(char in ascii_uppercase for char in password )
lowercase : List[str] = any(char in ascii_lowercase for char in password )
lowercase : Dict = any(char in digits for char in password )
lowercase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __lowercase ( ) ->Dict:
"""simple docstring"""
lowercase : Union[str, Any] = int(input('''Please indicate the max length of your password: ''' ).strip() )
lowercase : Optional[Any] = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''', password_generator(_UpperCamelCase ) )
print(
'''Alternative Password generated:''', alternative_password_generator(_UpperCamelCase, _UpperCamelCase ), )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 337 | 1 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ):
A : Tuple = StableUnCLIPPipeline
A : Dict = TEXT_TO_IMAGE_PARAMS
A : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
A : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
A : Union[str, Any] = False
def __lowerCamelCase ( self ):
lowercase : Dict = 32
lowercase : List[str] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowercase : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowercase : List[str] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=SCREAMING_SNAKE_CASE__ , projection_dim=SCREAMING_SNAKE_CASE__ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase : Optional[Any] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=SCREAMING_SNAKE_CASE__ , num_layers=1 , )
torch.manual_seed(0 )
lowercase : Union[str, Any] = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=SCREAMING_SNAKE_CASE__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
lowercase : List[Any] = StableUnCLIPImageNormalizer(embedding_dim=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowercase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowercase : str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=SCREAMING_SNAKE_CASE__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase : int = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=SCREAMING_SNAKE_CASE__ , layers_per_block=1 , upcast_attention=SCREAMING_SNAKE_CASE__ , use_linear_projection=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
lowercase : Union[str, Any] = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , steps_offset=1 , )
torch.manual_seed(0 )
lowercase : Dict = AutoencoderKL()
lowercase : int = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ):
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
lowercase : str = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowercase : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCamelCase ( self ):
lowercase : Optional[int] = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=SCREAMING_SNAKE_CASE__ )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
lowercase : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
lowercase : Union[str, Any] = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase : List[Any] = pipe('''anime turle''' , generator=SCREAMING_SNAKE_CASE__ , output_type='''np''' )
lowercase : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase : Tuple = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
lowercase : int = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase : Optional[int] = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
lowercase : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 337 |
from __future__ import annotations
__a = []
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, -1, -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, len(_UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
if row >= len(_UpperCamelCase ):
solution.append(_UpperCamelCase )
printboard(_UpperCamelCase )
print()
return True
for i in range(len(_UpperCamelCase ) ):
if is_safe(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : int = 1
solve(_UpperCamelCase, row + 1 )
lowercase : Tuple = 0
return False
def __lowercase ( _UpperCamelCase ) ->None:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
for j in range(len(_UpperCamelCase ) ):
if board[i][j] == 1:
print('''Q''', end=''' ''' )
else:
print('''.''', end=''' ''' )
print()
# n=int(input("The no. of queens"))
__a = 8
__a = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 337 | 1 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__a = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
lowercase : Optional[int] = getattr(_UpperCamelCase, _UpperCamelCase )
if weight_type is not None:
lowercase : str = getattr(_UpperCamelCase, _UpperCamelCase ).shape
else:
lowercase : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowercase : Any = value
elif weight_type == "weight_g":
lowercase : int = value
elif weight_type == "weight_v":
lowercase : Tuple = value
elif weight_type == "bias":
lowercase : List[str] = value
else:
lowercase : Union[str, Any] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->int:
"""simple docstring"""
lowercase : Dict = []
lowercase : Any = fairseq_model.state_dict()
lowercase : Union[str, Any] = hf_model.feature_extractor
lowercase : str = hf_model.adapter
for name, value in fairseq_dict.items():
lowercase : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, hf_model.config.feat_extract_norm == '''group''', )
lowercase : int = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
lowercase : int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase : Union[str, Any] = True
if "*" in mapped_key:
lowercase : Optional[Any] = name.split(_UpperCamelCase )[0].split('''.''' )[-2]
lowercase : Any = mapped_key.replace('''*''', _UpperCamelCase )
if "weight_g" in name:
lowercase : Optional[int] = '''weight_g'''
elif "weight_v" in name:
lowercase : List[str] = '''weight_v'''
elif "bias" in name:
lowercase : int = '''bias'''
elif "weight" in name:
lowercase : List[Any] = '''weight'''
else:
lowercase : Union[str, Any] = None
set_recursively(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->int:
"""simple docstring"""
lowercase : Optional[Any] = full_name.split('''conv_layers.''' )[-1]
lowercase : Dict = name.split('''.''' )
lowercase : str = int(items[0] )
lowercase : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowercase : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowercase : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowercase : Optional[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowercase : Optional[int] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Dict:
"""simple docstring"""
lowercase : List[Any] = full_name.split('''adaptor.''' )[-1]
lowercase : str = name.split('''.''' )
if items[1].isdigit():
lowercase : List[Any] = int(items[1] )
else:
lowercase : int = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
lowercase : Optional[int] = value
logger.info(f"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
lowercase : Dict = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
lowercase : Any = value
logger.info(f"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
lowercase : Optional[int] = value
logger.info(f"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(_UpperCamelCase, _UpperCamelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
lowercase : List[str] = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
lowercase : List[str] = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
lowercase , lowercase : Any = emb.weight.shape
lowercase : int = nn.Linear(_UpperCamelCase, _UpperCamelCase, bias=_UpperCamelCase )
lowercase : Optional[Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, ) ->Optional[Any]:
"""simple docstring"""
lowercase : Optional[Any] = WavaVecaConfig.from_pretrained(
_UpperCamelCase, add_adapter=_UpperCamelCase, adapter_stride=_UpperCamelCase, adapter_kernel_size=_UpperCamelCase, use_auth_token=_UpperCamelCase, output_hidden_size=_UpperCamelCase, )
lowercase : int = MBartConfig.from_pretrained(_UpperCamelCase )
# load model
lowercase , lowercase , lowercase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
}, )
lowercase : List[Any] = model[0].eval()
# load feature extractor
lowercase : int = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase, use_auth_token=_UpperCamelCase )
# set weights for wav2vec2 encoder
lowercase : Dict = WavaVecaModel(_UpperCamelCase )
recursively_load_weights_wavaveca(model.encoder, _UpperCamelCase )
# load decoder weights
lowercase : Optional[Any] = MBartForCausalLM(_UpperCamelCase )
lowercase , lowercase : str = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=_UpperCamelCase )
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
lowercase : Any = SpeechEncoderDecoderModel(encoder=_UpperCamelCase, decoder=_UpperCamelCase )
lowercase : List[Any] = False
lowercase : str = MBartaaTokenizer(_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
lowercase : str = hf_wavavec.config.to_dict()
lowercase : str = tokenizer.pad_token_id
lowercase : str = tokenizer.bos_token_id
lowercase : Any = tokenizer.eos_token_id
lowercase : Optional[Any] = '''mbart50'''
lowercase : Optional[int] = '''wav2vec2'''
lowercase : Optional[Any] = tokenizer.eos_token_id
lowercase : str = 250004
lowercase : str = tokenizer.eos_token_id
lowercase : str = SpeechEncoderDecoderConfig.from_dict(_UpperCamelCase )
hf_wavavec.save_pretrained(_UpperCamelCase )
feature_extractor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=10_24, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=25_00_04, type=int, help='''`decoder_start_token_id` of model config''')
__a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 337 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337 | 1 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def __lowercase ( _UpperCamelCase ) ->str:
"""simple docstring"""
lowercase : List[str] = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
f"""{test_file} instead.""" )
lowercase : Optional[int] = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
lowercase : List[Any] = components[:-1] + [test_fn.replace('''.py''', '''''' )]
lowercase : Union[str, Any] = '''.'''.join(_UpperCamelCase )
return test_module_path
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
lowercase : Any = get_module_path(_UpperCamelCase )
lowercase : Any = importlib.import_module(_UpperCamelCase )
return test_module
def __lowercase ( _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
lowercase : Optional[int] = []
lowercase : List[Any] = get_test_module(_UpperCamelCase )
for attr in dir(_UpperCamelCase ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(_UpperCamelCase, _UpperCamelCase ) )
# sort with class names
return sorted(_UpperCamelCase, key=lambda _UpperCamelCase : x.__name__ )
def __lowercase ( _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
lowercase : Optional[int] = []
lowercase : Optional[Any] = get_test_module(_UpperCamelCase )
for attr in dir(_UpperCamelCase ):
lowercase : Optional[int] = getattr(_UpperCamelCase, _UpperCamelCase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowercase : List[str] = getattr(_UpperCamelCase, '''all_model_classes''', [] )
if len(_UpperCamelCase ) > 0:
test_classes.append(_UpperCamelCase )
# sort with class names
return sorted(_UpperCamelCase, key=lambda _UpperCamelCase : x.__name__ )
def __lowercase ( _UpperCamelCase ) ->Tuple:
"""simple docstring"""
lowercase : int = get_test_classes(_UpperCamelCase )
lowercase : str = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_UpperCamelCase, key=lambda _UpperCamelCase : x.__name__ )
def __lowercase ( _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : Optional[int] = test_class()
if hasattr(_UpperCamelCase, '''setUp''' ):
test.setUp()
lowercase : str = None
if hasattr(_UpperCamelCase, '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowercase : str = test.model_tester.__class__
return model_tester
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
lowercase : Any = get_test_classes(_UpperCamelCase )
lowercase : Optional[Any] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_UpperCamelCase )
# sort with class names
return sorted(_UpperCamelCase, key=lambda _UpperCamelCase : x.__name__ )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
lowercase : List[str] = get_test_classes_for_model(_UpperCamelCase, _UpperCamelCase )
lowercase : List[str] = []
for test_class in test_classes:
lowercase : str = get_model_tester_from_test_class(_UpperCamelCase )
if tester_class is not None:
tester_classes.append(_UpperCamelCase )
# sort with class names
return sorted(_UpperCamelCase, key=lambda _UpperCamelCase : x.__name__ )
def __lowercase ( _UpperCamelCase ) ->str:
"""simple docstring"""
lowercase : int = get_test_classes(_UpperCamelCase )
lowercase : List[str] = {test_class: get_model_tester_from_test_class(_UpperCamelCase ) for test_class in test_classes}
return test_tester_mapping
def __lowercase ( _UpperCamelCase ) ->str:
"""simple docstring"""
lowercase : Any = get_model_classes(_UpperCamelCase )
lowercase : Dict = {
model_class: get_test_classes_for_model(_UpperCamelCase, _UpperCamelCase ) for model_class in model_classes
}
return model_test_mapping
def __lowercase ( _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : str = get_model_classes(_UpperCamelCase )
lowercase : List[str] = {
model_class: get_tester_classes_for_model(_UpperCamelCase, _UpperCamelCase ) for model_class in model_classes
}
return model_to_tester_mapping
def __lowercase ( _UpperCamelCase ) ->str:
"""simple docstring"""
if isinstance(_UpperCamelCase, _UpperCamelCase ):
return o
elif isinstance(_UpperCamelCase, _UpperCamelCase ):
return o.__name__
elif isinstance(_UpperCamelCase, (list, tuple) ):
return [to_json(_UpperCamelCase ) for x in o]
elif isinstance(_UpperCamelCase, _UpperCamelCase ):
return {to_json(_UpperCamelCase ): to_json(_UpperCamelCase ) for k, v in o.items()}
else:
return o
| 337 |
from collections.abc import Callable
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
# Stores actual heap items.
lowercase : list = []
# Stores indexes of each item for supporting updates and deletion.
lowercase : dict = {}
# Stores current size of heap.
lowercase : str = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowercase : Tuple = key or (lambda SCREAMING_SNAKE_CASE__ : x)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : Dict = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowercase , lowercase : int = self.arr[j], self.arr[i]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : int = self._left(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self._right(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = i
if left is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = left
if right is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = right
return valid_parent
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = self._parent(SCREAMING_SNAKE_CASE__ )
while parent is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Optional[int] = parent, self._parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
while valid_parent != index:
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : str = valid_parent, self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : str = self.pos_map[item]
lowercase : Optional[int] = [item, self.key(SCREAMING_SNAKE_CASE__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : List[str] = self.pos_map[item]
del self.pos_map[item]
lowercase : Optional[int] = self.arr[self.size - 1]
lowercase : int = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(SCREAMING_SNAKE_CASE__ )] )
else:
lowercase : int = [item, self.key(SCREAMING_SNAKE_CASE__ )]
lowercase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
lowercase : str = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __lowercase ( ) ->None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 1 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = (UnCLIPScheduler,)
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def __lowerCamelCase ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , prev_timestep=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Dict = self.scheduler_classes[0]
lowercase : List[str] = self.get_scheduler_config(variance_type='''fixed_small_log''' )
lowercase : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1E-5
def __lowerCamelCase ( self ):
lowercase : Any = self.scheduler_classes[0]
lowercase : Any = self.get_scheduler_config(variance_type='''learned_range''' )
lowercase : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = 0.5
assert scheduler._get_variance(1 , predicted_variance=SCREAMING_SNAKE_CASE__ ) - -10.1712790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=SCREAMING_SNAKE_CASE__ ) - -5.7998052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=SCREAMING_SNAKE_CASE__ ) - -0.0010011 < 1E-5
def __lowerCamelCase ( self ):
lowercase : Optional[int] = self.scheduler_classes[0]
lowercase : int = self.get_scheduler_config()
lowercase : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowercase : Any = scheduler.timesteps
lowercase : List[Any] = self.dummy_model()
lowercase : Dict = self.dummy_sample_deter
lowercase : str = torch.manual_seed(0 )
for i, t in enumerate(SCREAMING_SNAKE_CASE__ ):
# 1. predict noise residual
lowercase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# 2. predict previous mean of sample x_t-1
lowercase : Tuple = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
lowercase : Optional[Any] = pred_prev_sample
lowercase : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowercase : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 252.2682495 ) < 1E-2
assert abs(result_mean.item() - 0.3284743 ) < 1E-3
def __lowerCamelCase ( self ):
lowercase : str = self.scheduler_classes[0]
lowercase : Dict = self.get_scheduler_config()
lowercase : int = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(25 )
lowercase : Union[str, Any] = scheduler.timesteps
lowercase : int = self.dummy_model()
lowercase : str = self.dummy_sample_deter
lowercase : str = torch.manual_seed(0 )
for i, t in enumerate(SCREAMING_SNAKE_CASE__ ):
# 1. predict noise residual
lowercase : List[Any] = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if i + 1 == timesteps.shape[0]:
lowercase : int = None
else:
lowercase : Any = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase : int = scheduler.step(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , prev_timestep=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
lowercase : int = pred_prev_sample
lowercase : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowercase : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 258.2044983 ) < 1E-2
assert abs(result_mean.item() - 0.3362038 ) < 1E-3
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
pass
| 337 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 337 | 1 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
__a = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
__a = {
'''ctrl''': 2_56,
}
__a = {
'''Pregnancy''': 16_86_29,
'''Christianity''': 76_75,
'''Explain''': 10_64_23,
'''Fitness''': 6_34_40,
'''Saving''': 6_31_63,
'''Ask''': 2_71_71,
'''Ass''': 9_59_85,
'''Joke''': 16_35_09,
'''Questions''': 4_56_22,
'''Thoughts''': 4_96_05,
'''Retail''': 5_23_42,
'''Feminism''': 16_43_38,
'''Writing''': 1_19_92,
'''Atheism''': 19_22_63,
'''Netflix''': 4_86_16,
'''Computing''': 3_96_39,
'''Opinion''': 4_32_13,
'''Alone''': 4_49_67,
'''Funny''': 5_89_17,
'''Gaming''': 4_03_58,
'''Human''': 40_88,
'''India''': 13_31,
'''Joker''': 7_71_38,
'''Diet''': 3_62_06,
'''Legal''': 1_18_59,
'''Norman''': 49_39,
'''Tip''': 7_26_89,
'''Weight''': 5_23_43,
'''Movies''': 4_62_73,
'''Running''': 2_34_25,
'''Science''': 20_90,
'''Horror''': 3_77_93,
'''Confession''': 6_05_72,
'''Finance''': 1_22_50,
'''Politics''': 1_63_60,
'''Scary''': 19_19_85,
'''Support''': 1_26_54,
'''Technologies''': 3_25_16,
'''Teenage''': 6_61_60,
'''Event''': 3_27_69,
'''Learned''': 6_74_60,
'''Notion''': 18_27_70,
'''Wikipedia''': 3_75_83,
'''Books''': 66_65,
'''Extract''': 7_60_50,
'''Confessions''': 10_27_01,
'''Conspiracy''': 7_59_32,
'''Links''': 6_36_74,
'''Narcissus''': 15_04_25,
'''Relationship''': 5_47_66,
'''Relationships''': 13_47_96,
'''Reviews''': 4_16_71,
'''News''': 42_56,
'''Translation''': 2_68_20,
'''multilingual''': 12_84_06,
}
def __lowercase ( _UpperCamelCase ) ->Tuple:
"""simple docstring"""
lowercase : int = set()
lowercase : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : List[Any] = char
lowercase : int = set(_UpperCamelCase )
return pairs
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Optional[int] = VOCAB_FILES_NAMES
A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Dict = CONTROL_CODES
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<unk>" , **SCREAMING_SNAKE_CASE__ ):
super().__init__(unk_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as vocab_handle:
lowercase : Optional[int] = json.load(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as merges_handle:
lowercase : Union[str, Any] = merges_handle.read().split('''\n''' )[1:-1]
lowercase : Optional[int] = [tuple(merge.split() ) for merge in merges]
lowercase : int = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
lowercase : Tuple = {}
@property
def __lowerCamelCase ( self ):
return len(self.encoder )
def __lowerCamelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if token in self.cache:
return self.cache[token]
lowercase : str = tuple(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowercase : Tuple = get_pairs(SCREAMING_SNAKE_CASE__ )
if not pairs:
return token
while True:
lowercase : int = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : Tuple = bigram
lowercase : List[Any] = []
lowercase : Optional[int] = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
try:
lowercase : List[str] = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : List[Any] = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : List[str] = tuple(SCREAMING_SNAKE_CASE__ )
lowercase : str = new_word
if len(SCREAMING_SNAKE_CASE__ ) == 1:
break
else:
lowercase : Tuple = get_pairs(SCREAMING_SNAKE_CASE__ )
lowercase : str = '''@@ '''.join(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = word[:-4]
lowercase : List[Any] = word
return word
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = []
lowercase : List[str] = re.findall(r'''\S+\n?''' , SCREAMING_SNAKE_CASE__ )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE__ ).split(''' ''' ) ) )
return split_tokens
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self.decoder.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = ''' '''.join(SCREAMING_SNAKE_CASE__ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : Any = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : Dict = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '''\n''' )
lowercase : str = 0
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
lowercase : Union[str, Any] = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 337 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a = logging.get_logger(__name__)
__a = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__a = {'''facebook/blenderbot-3B''': 1_28}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[int] = ['input_ids', 'attention_mask']
A : str = BlenderbotTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowercase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('''type''' ) )
lowercase : str = add_prefix_space
lowercase : List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = add_prefix_space
lowercase : str = '''post_processor'''
lowercase : str = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if tokenizer_component_instance:
lowercase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : Tuple = tuple(state['''sep'''] )
if "cls" in state:
lowercase : Union[str, Any] = tuple(state['''cls'''] )
lowercase : Optional[int] = False
if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : Any = add_prefix_space
lowercase : Tuple = True
if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE__ ) != trim_offsets:
lowercase : List[str] = trim_offsets
lowercase : Optional[int] = True
if changes_to_apply:
lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , state.pop('''type''' ) )
lowercase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE__ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowerCamelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value
lowercase : Any = value
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Any = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : int = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Tuple = [self.sep_token_id]
lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
return token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = ''' '''.join(SCREAMING_SNAKE_CASE__ )
lowercase : Any = self.encode(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length:
lowercase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 337 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowercase ( ) ->int:
"""simple docstring"""
lowercase : Tuple = HfArgumentParser(_UpperCamelCase )
lowercase : List[str] = parser.parse_args_into_dataclasses()[0]
lowercase : Optional[int] = TensorFlowBenchmark(args=_UpperCamelCase )
try:
lowercase : Any = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : Optional[int] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any = ''' '''.join(str(_UpperCamelCase ).split(''' ''' )[:-1] )
lowercase : Any = ''''''
lowercase : str = eval(str(_UpperCamelCase ).split(''' ''' )[-1] )
lowercase : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
lowercase : Union[str, Any] = full_error_msg + begin_error_msg + str(_UpperCamelCase )
raise ValueError(_UpperCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 337 | 1 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__a = logging.getLogger(__name__)
__a = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __SCREAMING_SNAKE_CASE :
A : Optional[str] = field(
default=A__ , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
A : Optional[str] = field(
default=A__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(A__ )} , )
A : Optional[str] = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A : Optional[str] = field(
default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A : Optional[str] = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
A : Optional[str] = field(
default=A__ , metadata={'help': 'The input training data file (a text file).'} )
A : Optional[str] = field(
default=A__ , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
A : Optional[str] = field(
default=A__ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
A : Optional[str] = field(
default=A__ , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
A : Optional[str] = field(
default=A__ , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
A : bool = field(
default=A__ , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
A : bool = field(
default=A__ , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
A : bool = field(default=A__ , metadata={'help': 'Whether ot not to use whole word mask.'} )
A : float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
A : float = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
A : int = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
A : int = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
A : bool = field(
default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = False, _UpperCamelCase = None, ) ->Optional[int]:
"""simple docstring"""
def _dataset(_UpperCamelCase, _UpperCamelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=_UpperCamelCase, file_path=_UpperCamelCase, block_size=args.block_size, ref_path=_UpperCamelCase, )
return LineByLineTextDataset(tokenizer=_UpperCamelCase, file_path=_UpperCamelCase, block_size=args.block_size )
else:
return TextDataset(
tokenizer=_UpperCamelCase, file_path=_UpperCamelCase, block_size=args.block_size, overwrite_cache=args.overwrite_cache, cache_dir=_UpperCamelCase, )
if evaluate:
return _dataset(args.eval_data_file, args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_UpperCamelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file, args.train_ref_file )
def __lowercase ( ) ->Any:
"""simple docstring"""
lowercase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase , lowercase , lowercase : Tuple = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''', _UpperCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowercase : Dict = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowercase : Any = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir )
else:
lowercase : Tuple = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
lowercase : List[Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name, cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
lowercase : Optional[int] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=_UpperCamelCase, cache_dir=model_args.cache_dir, )
else:
logger.info('''Training new model from scratch''' )
lowercase : List[str] = AutoModelWithLMHead.from_config(_UpperCamelCase )
model.resize_token_embeddings(len(_UpperCamelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
lowercase : Union[str, Any] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowercase : str = min(data_args.block_size, tokenizer.max_len )
# Get datasets
lowercase : Optional[Any] = (
get_dataset(_UpperCamelCase, tokenizer=_UpperCamelCase, cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowercase : int = (
get_dataset(_UpperCamelCase, tokenizer=_UpperCamelCase, evaluate=_UpperCamelCase, cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowercase : str = DataCollatorForPermutationLanguageModeling(
tokenizer=_UpperCamelCase, plm_probability=data_args.plm_probability, max_span_length=data_args.max_span_length, )
else:
if data_args.mlm and data_args.whole_word_mask:
lowercase : Optional[Any] = DataCollatorForWholeWordMask(
tokenizer=_UpperCamelCase, mlm_probability=data_args.mlm_probability )
else:
lowercase : Optional[int] = DataCollatorForLanguageModeling(
tokenizer=_UpperCamelCase, mlm=data_args.mlm, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowercase : Tuple = Trainer(
model=_UpperCamelCase, args=_UpperCamelCase, data_collator=_UpperCamelCase, train_dataset=_UpperCamelCase, eval_dataset=_UpperCamelCase, prediction_loss_only=_UpperCamelCase, )
# Training
if training_args.do_train:
lowercase : List[Any] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_UpperCamelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase : Tuple = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase : Optional[Any] = trainer.evaluate()
lowercase : Optional[int] = math.exp(eval_output['''eval_loss'''] )
lowercase : Tuple = {'''perplexity''': perplexity}
lowercase : Optional[int] = os.path.join(training_args.output_dir, '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(_UpperCamelCase, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''', _UpperCamelCase, str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(_UpperCamelCase )
return results
def __lowercase ( _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 337 |
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = [False] * len(_UpperCamelCase )
lowercase : Optional[int] = []
queue.append(_UpperCamelCase )
lowercase : Union[str, Any] = True
while queue:
lowercase : List[str] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCamelCase )
lowercase : Tuple = True
lowercase : Optional[Any] = u
return visited[t]
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : List[str] = [-1] * (len(_UpperCamelCase ))
lowercase : int = 0
while bfs(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : List[str] = float('''Inf''' )
lowercase : int = sink
while s != source:
# Find the minimum value in select path
lowercase : List[Any] = min(_UpperCamelCase, graph[parent[s]][s] )
lowercase : Union[str, Any] = parent[s]
max_flow += path_flow
lowercase : Optional[int] = sink
while v != source:
lowercase : Any = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Union[str, Any] = parent[v]
return max_flow
__a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__a , __a = 0, 5
print(ford_fulkerson(graph, source, sink))
| 337 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __SCREAMING_SNAKE_CASE ( A__ ):
A : torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( A__ , A__ ):
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE__ = 32 , SCREAMING_SNAKE_CASE__ = 64 , SCREAMING_SNAKE_CASE__ = 20 , SCREAMING_SNAKE_CASE__ = 768 , SCREAMING_SNAKE_CASE__=77 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = "silu" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "linear" , SCREAMING_SNAKE_CASE__ = "prd" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
super().__init__()
lowercase : Dict = num_attention_heads
lowercase : int = attention_head_dim
lowercase : Union[str, Any] = num_attention_heads * attention_head_dim
lowercase : str = additional_embeddings
lowercase : Optional[int] = time_embed_dim or inner_dim
lowercase : Union[str, Any] = embedding_proj_dim or embedding_dim
lowercase : Dict = clip_embed_dim or embedding_dim
lowercase : List[Any] = Timesteps(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 0 )
lowercase : Optional[Any] = TimestepEmbedding(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , out_dim=SCREAMING_SNAKE_CASE__ , act_fn=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if embedding_proj_norm_type is None:
lowercase : List[Any] = None
elif embedding_proj_norm_type == "layer":
lowercase : Any = nn.LayerNorm(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
lowercase : str = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if encoder_hid_proj_type is None:
lowercase : str = None
elif encoder_hid_proj_type == "linear":
lowercase : Tuple = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
lowercase : Dict = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , SCREAMING_SNAKE_CASE__ ) )
if added_emb_type == "prd":
lowercase : Optional[Any] = nn.Parameter(torch.zeros(1 , 1 , SCREAMING_SNAKE_CASE__ ) )
elif added_emb_type is None:
lowercase : List[str] = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
lowercase : int = nn.ModuleList(
[
BasicTransformerBlock(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , activation_fn='''gelu''' , attention_bias=SCREAMING_SNAKE_CASE__ , )
for d in range(SCREAMING_SNAKE_CASE__ )
] )
if norm_in_type == "layer":
lowercase : Dict = nn.LayerNorm(SCREAMING_SNAKE_CASE__ )
elif norm_in_type is None:
lowercase : List[Any] = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
lowercase : List[Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ )
lowercase : str = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : int = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
lowercase : Optional[Any] = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , SCREAMING_SNAKE_CASE__ , persistent=SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = nn.Parameter(torch.zeros(1 , SCREAMING_SNAKE_CASE__ ) )
lowercase : Optional[int] = nn.Parameter(torch.zeros(1 , SCREAMING_SNAKE_CASE__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCamelCase ( self ):
lowercase : int = {}
def fn_recursive_add_processors(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if hasattr(SCREAMING_SNAKE_CASE__ , '''set_processor''' ):
lowercase : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return processors
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = len(self.attn_processors.keys() )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(SCREAMING_SNAKE_CASE__ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if hasattr(SCREAMING_SNAKE_CASE__ , '''set_processor''' ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
module.set_processor(SCREAMING_SNAKE_CASE__ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for name, module in self.named_children():
fn_recursive_attn_processor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
self.set_attn_processor(AttnProcessor() )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , ):
lowercase : Optional[int] = hidden_states.shape[0]
lowercase : Tuple = timestep
if not torch.is_tensor(SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(SCREAMING_SNAKE_CASE__ ) and len(timesteps.shape ) == 0:
lowercase : int = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase : Any = timesteps * torch.ones(SCREAMING_SNAKE_CASE__ , dtype=timesteps.dtype , device=timesteps.device )
lowercase : Union[str, Any] = self.time_proj(SCREAMING_SNAKE_CASE__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
lowercase : Any = timesteps_projected.to(dtype=self.dtype )
lowercase : List[str] = self.time_embedding(SCREAMING_SNAKE_CASE__ )
if self.embedding_proj_norm is not None:
lowercase : List[Any] = self.embedding_proj_norm(SCREAMING_SNAKE_CASE__ )
lowercase : int = self.embedding_proj(SCREAMING_SNAKE_CASE__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
lowercase : List[Any] = self.encoder_hidden_states_proj(SCREAMING_SNAKE_CASE__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
lowercase : Any = self.proj_in(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = self.positional_embedding.to(hidden_states.dtype )
lowercase : Dict = []
lowercase : Tuple = 0
if encoder_hidden_states is not None:
additional_embeds.append(SCREAMING_SNAKE_CASE__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
lowercase : Dict = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
lowercase : List[Any] = hidden_states[:, None, :]
lowercase : Optional[Any] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
lowercase : Tuple = self.prd_embedding.to(hidden_states.dtype ).expand(SCREAMING_SNAKE_CASE__ , -1 , -1 )
additional_embeds.append(SCREAMING_SNAKE_CASE__ )
lowercase : int = torch.cat(
SCREAMING_SNAKE_CASE__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
lowercase : str = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
lowercase : str = F.pad(
SCREAMING_SNAKE_CASE__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
lowercase : Tuple = hidden_states + positional_embeddings
if attention_mask is not None:
lowercase : List[str] = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
lowercase : List[str] = F.pad(SCREAMING_SNAKE_CASE__ , (0, self.additional_embeddings) , value=0.0 )
lowercase : Dict = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
lowercase : Tuple = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
lowercase : Union[str, Any] = self.norm_in(SCREAMING_SNAKE_CASE__ )
for block in self.transformer_blocks:
lowercase : Optional[int] = block(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self.norm_out(SCREAMING_SNAKE_CASE__ )
if self.prd_embedding is not None:
lowercase : List[str] = hidden_states[:, -1]
else:
lowercase : int = hidden_states[:, additional_embeddings_len:]
lowercase : Optional[Any] = self.proj_to_clip_embeddings(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 337 |
from typing import List
from .keymap import KEYMAP, get_character
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : str = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += [key]
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
def __lowercase ( *_UpperCamelCase ) ->Any:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : List[Any] = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += keys
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
class __SCREAMING_SNAKE_CASE ( A__ ):
def __new__( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not hasattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' ):
setattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' , {} )
setattr(SCREAMING_SNAKE_CASE__ , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , '''handle_key''' , [] )
for key in handled_keys:
lowercase : List[Any] = value
return new_cls
@staticmethod
def __lowerCamelCase ( cls ):
lowercase : Dict = get_character()
if char != KEYMAP["undefined"]:
lowercase : Optional[int] = ord(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = cls.key_handler.get(SCREAMING_SNAKE_CASE__ )
if handler:
lowercase : Tuple = char
return handler(cls )
else:
return None
def __lowercase ( cls ) ->Any:
"""simple docstring"""
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 337 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.