code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int=13 , UpperCamelCase__ : Union[str, Any]=30 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Any=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : List[str]=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : int=10 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : List[str]=0.6 , UpperCamelCase__ : Dict=None , ) -> str:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = mask_ratio
__magic_name__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__magic_name__ = (image_size // patch_size) ** 2
__magic_name__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowercase ( self : Dict ) -> Tuple:
"""simple docstring"""
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _lowercase ( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ) -> str:
"""simple docstring"""
__magic_name__ = TFViTMAEModel(config=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ) -> Any:
"""simple docstring"""
__magic_name__ = TFViTMAEForPreTraining(UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , training=UpperCamelCase__ )
# expected sequence length = num_patches
__magic_name__ = (self.image_size // self.patch_size) ** 2
__magic_name__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__magic_name__ = 1
__magic_name__ = TFViTMAEForPreTraining(UpperCamelCase__ )
__magic_name__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ = model(UpperCamelCase__ , training=UpperCamelCase__ )
__magic_name__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
((__magic_name__) , (__magic_name__) , (__magic_name__)) = config_and_inputs
__magic_name__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
a__ = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = TFViTMAEModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self : Optional[int] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
def _lowercase ( self : Optional[int] ) -> Any:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__magic_name__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , tf.keras.layers.Layer ) )
def _lowercase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _lowercase ( self : List[Any] ) -> str:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
np.random.seed(2 )
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , noise=UpperCamelCase__ )
__magic_name__ = copy.deepcopy(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__magic_name__ = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
__magic_name__ = outputs_dict[0].numpy()
__magic_name__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
np.random.seed(2 )
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCamelCase__ : int ):
__magic_name__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCamelCase__ ):
__magic_name__ = v.numpy()
else:
__magic_name__ = np.array(UpperCamelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = prepare_numpy_arrays(UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , noise=UpperCamelCase__ )
__magic_name__ = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
__magic_name__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
__magic_name__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__magic_name__ = tf.constant(UpperCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__magic_name__ = tf_noise
super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : List[str] ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCamelCase__ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(UpperCamelCase__ , UpperCamelCase__ ),)
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCamelCase__ , """_keras_serializable""" , UpperCamelCase__ )
}
__magic_name__ = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__magic_name__ = tf.convert_to_tensor(UpperCamelCase__ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
__magic_name__ = main_layer_class(UpperCamelCase__ )
__magic_name__ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
__magic_name__ = tf.keras.Model(UpperCamelCase__ , outputs=main_layer(UpperCamelCase__ ) )
__magic_name__ = model(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ = os.path.join(UpperCamelCase__ , """keras_model.h5""" )
model.save(UpperCamelCase__ )
__magic_name__ = tf.keras.models.load_model(
UpperCamelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCamelCase__ , tf.keras.Model )
__magic_name__ = model(UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
@slow
def _lowercase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , noise=UpperCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
__magic_name__ = outputs.last_hidden_state.numpy()
__magic_name__ = 0
else:
__magic_name__ = outputs.logits.numpy()
__magic_name__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ )
__magic_name__ = model_class.from_pretrained(UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , noise=UpperCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
__magic_name__ = after_outputs["""last_hidden_state"""].numpy()
__magic_name__ = 0
else:
__magic_name__ = after_outputs["""logits"""].numpy()
__magic_name__ = 0
__magic_name__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1E-5 )
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
np.random.seed(2 )
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , noise=UpperCamelCase__ )
__magic_name__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCamelCase__ )
__magic_name__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
__magic_name__ = model_class.from_config(model.config )
__magic_name__ = new_model(UpperCamelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
__magic_name__ = new_model(UpperCamelCase__ , noise=UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
@slow
def _lowercase ( self : Any ) -> str:
"""simple docstring"""
__magic_name__ = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(UpperCamelCase__ )
def a__ ( ):
'''simple docstring'''
__magic_name__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
np.random.seed(2 )
__magic_name__ = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=UpperCamelCase__ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__magic_name__ = ViTMAEConfig()
__magic_name__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__magic_name__ = np.random.uniform(size=(1, num_patches) )
# forward pass
__magic_name__ = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
# verify the logits
__magic_name__ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
__magic_name__ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1E-4 )
| 88 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : List[str] = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__lowerCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 | 1 |
from __future__ import annotations
import requests
lowerCamelCase__ = set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def A(__a: str , __a: int = 1 , __a: str = "new" , __a: list | None = None ):
lowerCAmelCase_ = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__a ) - valid_terms ) ):
lowerCAmelCase_ = F"Invalid search term: {invalid_search_terms}"
raise ValueError(__a )
lowerCAmelCase_ = requests.get(
F"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}" , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
lowerCAmelCase_ = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__a )}
lowerCAmelCase_ = {}
for id_ in range(__a ):
lowerCAmelCase_ = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
| 22 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase__ = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
__A = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=a_ )
__A = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=a_ )
env_command_parser(subparsers=a_ )
launch_command_parser(subparsers=a_ )
tpu_command_parser(subparsers=a_ )
test_command_parser(subparsers=a_ )
# Let's go
__A = parser.parse_args()
if not hasattr(a_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(a_ )
if __name__ == "__main__":
main()
| 15 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case_ = 7_6_8 , ):
"""simple docstring"""
super().__init__()
A_ : Optional[int] = nn.Parameter(torch.zeros(1 , snake_case_ ) )
A_ : Optional[int] = nn.Parameter(torch.ones(1 , snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ = None , snake_case_ = None , ):
"""simple docstring"""
A_ : str = nn.Parameter(self.mean.to(snake_case_ ).to(snake_case_ ) )
A_ : Optional[int] = nn.Parameter(self.std.to(snake_case_ ).to(snake_case_ ) )
return self
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Tuple = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : List[str] = (embeds * self.std) + self.mean
return embeds | 286 | 0 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str ):
def get_masked_lm_array(_lowerCamelCase : str ):
A__ = F"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"
A__ = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(_lowerCamelCase )
def get_encoder_array(_lowerCamelCase : str ):
A__ = F"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"
A__ = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(_lowerCamelCase )
def get_encoder_layer_array(_lowerCamelCase : int , _lowerCamelCase : str ):
A__ = F"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"
A__ = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(_lowerCamelCase )
def get_encoder_attention_layer_array(_lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : int ):
A__ = F"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"
A__ = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
A__ = array.reshape(_lowerCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(_lowerCamelCase )
print(F"Loading model based on config from {config_path}..." )
A__ = BertConfig.from_json_file(_lowerCamelCase )
A__ = BertForMaskedLM(_lowerCamelCase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
A__ = model.bert.encoder.layer[layer_index]
# Self-attention
A__ = layer.attention.self
A__ = get_encoder_attention_layer_array(
_lowerCamelCase , "_query_dense/kernel" , self_attn.query.weight.data.shape )
A__ = get_encoder_attention_layer_array(
_lowerCamelCase , "_query_dense/bias" , self_attn.query.bias.data.shape )
A__ = get_encoder_attention_layer_array(
_lowerCamelCase , "_key_dense/kernel" , self_attn.key.weight.data.shape )
A__ = get_encoder_attention_layer_array(
_lowerCamelCase , "_key_dense/bias" , self_attn.key.bias.data.shape )
A__ = get_encoder_attention_layer_array(
_lowerCamelCase , "_value_dense/kernel" , self_attn.value.weight.data.shape )
A__ = get_encoder_attention_layer_array(
_lowerCamelCase , "_value_dense/bias" , self_attn.value.bias.data.shape )
# Self-attention Output
A__ = layer.attention.output
A__ = get_encoder_attention_layer_array(
_lowerCamelCase , "_output_dense/kernel" , self_output.dense.weight.data.shape )
A__ = get_encoder_attention_layer_array(
_lowerCamelCase , "_output_dense/bias" , self_output.dense.bias.data.shape )
A__ = get_encoder_layer_array(_lowerCamelCase , "_attention_layer_norm/gamma" )
A__ = get_encoder_layer_array(_lowerCamelCase , "_attention_layer_norm/beta" )
# Intermediate
A__ = layer.intermediate
A__ = get_encoder_layer_array(_lowerCamelCase , "_intermediate_dense/kernel" )
A__ = get_encoder_layer_array(_lowerCamelCase , "_intermediate_dense/bias" )
# Output
A__ = layer.output
A__ = get_encoder_layer_array(_lowerCamelCase , "_output_dense/kernel" )
A__ = get_encoder_layer_array(_lowerCamelCase , "_output_dense/bias" )
A__ = get_encoder_layer_array(_lowerCamelCase , "_output_layer_norm/gamma" )
A__ = get_encoder_layer_array(_lowerCamelCase , "_output_layer_norm/beta" )
# Embeddings
A__ = get_encoder_array("_position_embedding_layer/embeddings" )
A__ = get_encoder_array("_type_embedding_layer/embeddings" )
A__ = get_encoder_array("_embedding_norm_layer/gamma" )
A__ = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
A__ = model.cls.predictions.transform
A__ = get_masked_lm_array("dense/kernel" )
A__ = get_masked_lm_array("dense/bias" )
A__ = get_masked_lm_array("layer_norm/gamma" )
A__ = get_masked_lm_array("layer_norm/beta" )
A__ = get_masked_lm_array("embedding_table" )
# Pooling
A__ = BertPooler(config=_lowerCamelCase )
A__ = get_encoder_array("_pooler_layer/kernel" )
A__ = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(_lowerCamelCase )
# Integration test - should load without any errors ;)
A__ = BertForMaskedLM.from_pretrained(_lowerCamelCase )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
__lowerCAmelCase : int =argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
__lowerCAmelCase : Optional[int] =parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 123 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] =[
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def UpperCamelCase ( _lowerCamelCase : int ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
A__ = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith("encoder" ):
A__ = k.replace(".attn" , ".self_attn" )
A__ = k.replace("norm1" , "self_attn_layer_norm" )
A__ = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
A__ = k.replace("norm1" , "self_attn_layer_norm" )
A__ = k.replace("norm2" , "encoder_attn_layer_norm" )
A__ = k.replace("norm3" , "final_layer_norm" )
return k
def UpperCamelCase ( _lowerCamelCase : int ):
A__ = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
A__ = sd.pop(_lowerCamelCase )
A__ = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
A__ = v
__lowerCAmelCase : Optional[int] =["START"]
@torch.no_grad()
def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] ):
A__ = torch.load(_lowerCamelCase , map_location="cpu" )
A__ = model["model"]
A__ = BlenderbotConfig.from_json_file(_lowerCamelCase )
A__ = BlenderbotForConditionalGeneration(_lowerCamelCase )
A__ = m.model.state_dict().keys()
A__ = []
A__ = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
A__ = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
A__ = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__lowerCAmelCase : Union[str, Any] =parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 123 | 1 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _snake_case ( _snake_case : list[list[float]] ):
lowerCAmelCase : str = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_snake_case ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCAmelCase : int = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
lowerCAmelCase : Optional[int] = [[0.0, 0.0], [0.0, 0.0]]
lowerCAmelCase, lowerCAmelCase : List[Any] = matrix[1][1], matrix[0][0]
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_snake_case ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_snake_case ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCAmelCase : int = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
lowerCAmelCase : Dict = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCAmelCase : List[str] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCAmelCase : Dict = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCAmelCase : str = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCAmelCase : Any = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCAmelCase : Any = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCAmelCase : Optional[int] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCAmelCase : Optional[int] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCAmelCase : Dict = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCAmelCase : List[Any] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCAmelCase : str = array(_snake_case )
for i in range(3 ):
for j in range(3 ):
lowerCAmelCase : Optional[Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCAmelCase : Tuple = array(_snake_case )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_snake_case )
# Calculate the inverse of the matrix
return [[float(d(_snake_case ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 60 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(SCREAMING_SNAKE_CASE ):
A : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : List[str] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(SCREAMING_SNAKE_CASE ):
A : Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Any = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
A : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
A : List[str] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE )
A : Optional[Any] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**SCREAMING_SNAKE_CASE ):
return model(**SCREAMING_SNAKE_CASE )
eval(**SCREAMING_SNAKE_CASE ).block_until_ready()
@slow
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
A : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = FlaxRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE )
A : int = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**SCREAMING_SNAKE_CASE ):
return model(**SCREAMING_SNAKE_CASE )
eval(**SCREAMING_SNAKE_CASE ).block_until_ready()
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ):
A : List[Any] = FlaxAutoModel.from_pretrained('''bert-base''' )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
A : Optional[int] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE , revision='''aaaaaa''' )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ):
A : List[str] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE , '''Use `from_pt=True` to load this model''' ):
A : Any = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
| 3 | 0 |
"""simple docstring"""
def UpperCAmelCase ( a_ = 10 ):
'''simple docstring'''
if not isinstance(a_, a_ ) or n < 0:
raise ValueError('Invalid input' )
lowerCamelCase : Union[str, Any] = 10**n
lowerCamelCase : int = 2_8433 * (pow(2, 783_0457, a_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(1_0) = }""")
| 205 |
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_A = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_A = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
_A = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
_A = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
_A = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
_A = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 205 | 1 |
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__lowerCamelCase = logging.getLogger(__name__)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
A_ = np.argmax(UpperCAmelCase__, axis=1 )
return np.sum(outputs == labels )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
with open(UpperCAmelCase__, encoding="""utf_8""" ) as f:
A_ = csv.reader(UpperCAmelCase__ )
A_ = []
next(UpperCAmelCase__ ) # skip the first line
for line in tqdm(UpperCAmelCase__ ):
output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Union[str, Any]:
A_ = []
for dataset in encoded_datasets:
A_ = len(UpperCAmelCase__ )
A_ = np.zeros((n_batch, 2, input_len), dtype=np.intaa )
A_ = np.zeros((n_batch, 2), dtype=np.intaa )
A_ = np.full((n_batch, 2, input_len), fill_value=-1_00, dtype=np.intaa )
A_ = np.zeros((n_batch,), dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(UpperCAmelCase__ ):
A_ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A_ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A_ = with_conta
A_ = with_conta
A_ = len(UpperCAmelCase__ ) - 1
A_ = len(UpperCAmelCase__ ) - 1
A_ = with_conta
A_ = with_conta
A_ = mc_label
A_ = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(UpperCAmelCase__ ) for t in all_inputs ) )
return tensor_datasets
def UpperCAmelCase__ ( ) -> int:
A_ = argparse.ArgumentParser()
parser.add_argument("""--model_name""", type=UpperCAmelCase__, default="""openai-gpt""", help="""pretrained model name""" )
parser.add_argument("""--do_train""", action="""store_true""", help="""Whether to run training.""" )
parser.add_argument("""--do_eval""", action="""store_true""", help="""Whether to run eval on the dev set.""" )
parser.add_argument(
"""--output_dir""", default=UpperCAmelCase__, type=UpperCAmelCase__, required=UpperCAmelCase__, help="""The output directory where the model predictions and checkpoints will be written.""", )
parser.add_argument("""--train_dataset""", type=UpperCAmelCase__, default="""""" )
parser.add_argument("""--eval_dataset""", type=UpperCAmelCase__, default="""""" )
parser.add_argument("""--seed""", type=UpperCAmelCase__, default=42 )
parser.add_argument("""--num_train_epochs""", type=UpperCAmelCase__, default=3 )
parser.add_argument("""--train_batch_size""", type=UpperCAmelCase__, default=8 )
parser.add_argument("""--eval_batch_size""", type=UpperCAmelCase__, default=16 )
parser.add_argument("""--adam_epsilon""", default=1e-8, type=UpperCAmelCase__, help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""", type=UpperCAmelCase__, default=1 )
parser.add_argument(
"""--max_steps""", default=-1, type=UpperCAmelCase__, help=(
"""If > 0: set total number of training steps to perform. Override num_train_epochs."""
), )
parser.add_argument(
"""--gradient_accumulation_steps""", type=UpperCAmelCase__, default=1, help="""Number of updates steps to accumulate before performing a backward/update pass.""", )
parser.add_argument("""--learning_rate""", type=UpperCAmelCase__, default=6.25e-5 )
parser.add_argument("""--warmup_steps""", default=0, type=UpperCAmelCase__, help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--lr_schedule""", type=UpperCAmelCase__, default="""warmup_linear""" )
parser.add_argument("""--weight_decay""", type=UpperCAmelCase__, default=0.01 )
parser.add_argument("""--lm_coef""", type=UpperCAmelCase__, default=0.9 )
parser.add_argument("""--n_valid""", type=UpperCAmelCase__, default=3_74 )
parser.add_argument("""--server_ip""", type=UpperCAmelCase__, default="""""", help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""", type=UpperCAmelCase__, default="""""", help="""Can be used for distant debugging.""" )
A_ = parser.parse_args()
print(UpperCAmelCase__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=UpperCAmelCase__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
A_ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
A_ = torch.cuda.device_count()
logger.info("""device: {}, n_gpu {}""".format(UpperCAmelCase__, UpperCAmelCase__ ) )
if not args.do_train and not args.do_eval:
raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
A_ = ["""_start_""", """_delimiter_""", """_classify_"""]
A_ = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(UpperCAmelCase__ )
A_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
A_ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(UpperCAmelCase__ ) )
model.to(UpperCAmelCase__ )
# Load and encode the datasets
def tokenize_and_encode(UpperCAmelCase__ ):
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(UpperCAmelCase__ ) )
elif isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
return obj
return [tokenize_and_encode(UpperCAmelCase__ ) for o in obj]
logger.info("""Encoding dataset...""" )
A_ = load_rocstories_dataset(args.train_dataset )
A_ = load_rocstories_dataset(args.eval_dataset )
A_ = (train_dataset, eval_dataset)
A_ = tokenize_and_encode(UpperCAmelCase__ )
# Compute the max input length for the Transformer
A_ = model.config.n_positions // 2 - 2
A_ = max(
len(story[:max_length] ) + max(len(conta[:max_length] ), len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
A_ = min(UpperCAmelCase__, model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
A_ = pre_process_datasets(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, *UpperCAmelCase__ )
A_ , A_ = tensor_datasets[0], tensor_datasets[1]
A_ = TensorDataset(*UpperCAmelCase__ )
A_ = RandomSampler(UpperCAmelCase__ )
A_ = DataLoader(UpperCAmelCase__, sampler=UpperCAmelCase__, batch_size=args.train_batch_size )
A_ = TensorDataset(*UpperCAmelCase__ )
A_ = SequentialSampler(UpperCAmelCase__ )
A_ = DataLoader(UpperCAmelCase__, sampler=UpperCAmelCase__, batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
A_ = args.max_steps
A_ = args.max_steps // (len(UpperCAmelCase__ ) // args.gradient_accumulation_steps) + 1
else:
A_ = len(UpperCAmelCase__ ) // args.gradient_accumulation_steps * args.num_train_epochs
A_ = list(model.named_parameters() )
A_ = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""]
A_ = [
{
"""params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"""weight_decay""": args.weight_decay,
},
{"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0},
]
A_ = AdamW(UpperCAmelCase__, lr=args.learning_rate, eps=args.adam_epsilon )
A_ = get_linear_schedule_with_warmup(
UpperCAmelCase__, num_warmup_steps=args.warmup_steps, num_training_steps=UpperCAmelCase__ )
if args.do_train:
A_ , A_ , A_ = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ), desc="""Epoch""" ):
A_ = 0
A_ = 0
A_ = tqdm(UpperCAmelCase__, desc="""Training""" )
for step, batch in enumerate(UpperCAmelCase__ ):
A_ = tuple(t.to(UpperCAmelCase__ ) for t in batch )
A_ , A_ , A_ , A_ = batch
A_ = model(UpperCAmelCase__, mc_token_ids=UpperCAmelCase__, lm_labels=UpperCAmelCase__, mc_labels=UpperCAmelCase__ )
A_ = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
A_ = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
A_ = """Training loss: {:.2e} lr: {:.2e}""".format(UpperCAmelCase__, scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
A_ = model.module if hasattr(UpperCAmelCase__, """module""" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
A_ = os.path.join(args.output_dir, UpperCAmelCase__ )
A_ = os.path.join(args.output_dir, UpperCAmelCase__ )
torch.save(model_to_save.state_dict(), UpperCAmelCase__ )
model_to_save.config.to_json_file(UpperCAmelCase__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
A_ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
A_ = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(UpperCAmelCase__ )
if args.do_eval:
model.eval()
A_ , A_ = 0, 0
A_ , A_ = 0, 0
for batch in tqdm(UpperCAmelCase__, desc="""Evaluating""" ):
A_ = tuple(t.to(UpperCAmelCase__ ) for t in batch )
A_ , A_ , A_ , A_ = batch
with torch.no_grad():
A_ , A_ , A_ , A_ = model(
UpperCAmelCase__, mc_token_ids=UpperCAmelCase__, lm_labels=UpperCAmelCase__, mc_labels=UpperCAmelCase__ )
A_ = mc_logits.detach().cpu().numpy()
A_ = mc_labels.to("""cpu""" ).numpy()
A_ = accuracy(UpperCAmelCase__, UpperCAmelCase__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
A_ = eval_loss / nb_eval_steps
A_ = eval_accuracy / nb_eval_examples
A_ = tr_loss / nb_tr_steps if args.do_train else None
A_ = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss}
A_ = os.path.join(args.output_dir, """eval_results.txt""" )
with open(UpperCAmelCase__, """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""", UpperCAmelCase__, str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 162 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=True, UpperCAmelCase__="pt" ) -> str:
A_ = {"""add_prefix_space""": True} if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and not line.startswith(""" """ ) else {}
A_ = padding_side
return tokenizer(
[line], max_length=UpperCAmelCase__, padding="""max_length""" if pad_to_max_length else None, truncation=UpperCAmelCase__, return_tensors=UpperCAmelCase__, add_special_tokens=UpperCAmelCase__, **UpperCAmelCase__, )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None, ) -> List[str]:
A_ = input_ids.ne(UpperCAmelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="train" , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="" , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
A_ = Path(UpperCamelCase__ ).joinpath(type_path + """.source""" )
A_ = Path(UpperCamelCase__ ).joinpath(type_path + """.target""" )
A_ = self.get_char_lens(self.src_file )
A_ = max_source_length
A_ = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
A_ = tokenizer
A_ = prefix
if n_obs is not None:
A_ = self.src_lens[:n_obs]
A_ = src_lang
A_ = tgt_lang
def __len__( self ) -> Dict:
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self , UpperCamelCase__ ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
A_ = index + 1 # linecache starts at 1
A_ = self.prefix + linecache.getline(str(self.src_file ) , UpperCamelCase__ ).rstrip("""\n""" )
A_ = linecache.getline(str(self.tgt_file ) , UpperCamelCase__ ).rstrip("""\n""" )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , UpperCamelCase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , UpperCamelCase__ ) else self.tokenizer
)
A_ = self.tokenizer.generator if isinstance(self.tokenizer , UpperCamelCase__ ) else self.tokenizer
A_ = encode_line(UpperCamelCase__ , UpperCamelCase__ , self.max_source_length , """right""" )
A_ = encode_line(UpperCamelCase__ , UpperCamelCase__ , self.max_target_length , """right""" )
A_ = source_inputs["""input_ids"""].squeeze()
A_ = target_inputs["""input_ids"""].squeeze()
A_ = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> Any:
'''simple docstring'''
return [len(UpperCamelCase__ ) for x in Path(UpperCamelCase__ ).open().readlines()]
def snake_case_ ( self , UpperCamelCase__ ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
A_ = torch.stack([x["""input_ids"""] for x in batch] )
A_ = torch.stack([x["""attention_mask"""] for x in batch] )
A_ = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , UpperCamelCase__ )
else self.tokenizer.pad_token_id
)
A_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , UpperCamelCase__ )
else self.tokenizer.pad_token_id
)
A_ = trim_batch(UpperCamelCase__ , UpperCamelCase__ )
A_ , A_ = trim_batch(UpperCamelCase__ , UpperCamelCase__ , attention_mask=UpperCamelCase__ )
A_ = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__lowerCamelCase = getLogger(__name__)
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
return list(itertools.chain.from_iterable(UpperCAmelCase__ ) )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
A_ = get_git_info()
save_json(UpperCAmelCase__, os.path.join(UpperCAmelCase__, """git_log.json""" ) )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=4, **UpperCAmelCase__ ) -> Dict:
with open(UpperCAmelCase__, """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__, indent=UpperCAmelCase__, **UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
with open(UpperCAmelCase__ ) as f:
return json.load(UpperCAmelCase__ )
def UpperCAmelCase__ ( ) -> Any:
A_ = git.Repo(search_parent_directories=UpperCAmelCase__ )
A_ = {
"""repo_id""": str(UpperCAmelCase__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List:
return list(map(UpperCAmelCase__, UpperCAmelCase__ ) )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
with open(UpperCAmelCase__, """wb""" ) as f:
return pickle.dump(UpperCAmelCase__, UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Union[str, Any]:
def remove_articles(UpperCAmelCase__ ):
return re.sub(r"""\b(a|an|the)\b""", """ """, UpperCAmelCase__ )
def white_space_fix(UpperCAmelCase__ ):
return " ".join(text.split() )
def remove_punc(UpperCAmelCase__ ):
A_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCAmelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase__ ) ) ) )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]:
A_ = normalize_answer(UpperCAmelCase__ ).split()
A_ = normalize_answer(UpperCAmelCase__ ).split()
A_ = Counter(UpperCAmelCase__ ) & Counter(UpperCAmelCase__ )
A_ = sum(common.values() )
if num_same == 0:
return 0
A_ = 1.0 * num_same / len(UpperCAmelCase__ )
A_ = 1.0 * num_same / len(UpperCAmelCase__ )
A_ = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[Any]:
return normalize_answer(UpperCAmelCase__ ) == normalize_answer(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = 0
for hypo, pred in zip(UpperCAmelCase__, UpperCAmelCase__ ):
em += exact_match_score(UpperCAmelCase__, UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
em /= len(UpperCAmelCase__ )
return {"em": em}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
return model_prefix.startswith("""rag""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[str]:
A_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ = """dropout_rate"""
for p in extra_params:
if getattr(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ):
if not hasattr(UpperCAmelCase__, UpperCAmelCase__ ) and not hasattr(UpperCAmelCase__, equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(UpperCAmelCase__ ) )
delattr(UpperCAmelCase__, UpperCAmelCase__ )
continue
A_ = p if hasattr(UpperCAmelCase__, UpperCAmelCase__ ) else equivalent_param[p]
setattr(UpperCAmelCase__, UpperCAmelCase__, getattr(UpperCAmelCase__, UpperCAmelCase__ ) )
delattr(UpperCAmelCase__, UpperCAmelCase__ )
return hparams, config
| 162 | 1 |
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : int ):
if number > 0:
raise ValueError("input must be a negative integer" )
A__ = len(bin(_lowerCamelCase )[3:] )
A__ = bin(abs(_lowerCamelCase ) - (1 << binary_number_length) )[3:]
A__ = (
(
"1"
+ "0" * (binary_number_length - len(_lowerCamelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : List[Any] ="examples/"
__lowerCAmelCase : Dict ={
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__lowerCAmelCase : List[str] ={
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__lowerCAmelCase : str ="README.md"
def UpperCamelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] ):
with open(_lowerCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
A__ = f.read()
A__, A__ = REPLACE_PATTERNS[pattern]
A__ = replace.replace("VERSION" , _lowerCamelCase )
A__ = re_pattern.sub(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : int ):
for folder, directories, fnames in os.walk(_lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , pattern="examples" )
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not patch:
update_version_in_examples(_lowerCamelCase )
def UpperCamelCase ( ):
A__ = "🤗 Transformers currently provides the following architectures"
A__ = "1. Want to contribute a new model?"
with open(_lowerCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
A__ = f.readlines()
# Find the start of the list.
A__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
A__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
A__ = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(_lowerCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_lowerCamelCase )
def UpperCamelCase ( ):
with open(REPLACE_FILES["init"] , "r" ) as f:
A__ = f.read()
A__ = REPLACE_PATTERNS["init"][0].search(_lowerCamelCase ).groups()[0]
return packaging.version.parse(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Dict=False ):
A__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
A__ = default_version.base_version
elif patch:
A__ = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
A__ = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
A__ = input(F"Which version are you releasing? [{default_version}]" )
if len(_lowerCamelCase ) == 0:
A__ = default_version
print(F"Updating version to {version}." )
global_version_update(_lowerCamelCase , patch=_lowerCamelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def UpperCamelCase ( ):
A__ = get_version()
A__ = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
A__ = current_version.base_version
# Check with the user we got that right.
A__ = input(F"Which version are we developing now? [{dev_version}]" )
if len(_lowerCamelCase ) == 0:
A__ = dev_version
print(F"Updating version to {version}." )
global_version_update(_lowerCamelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__lowerCAmelCase : int =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 123 | 0 |
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
A: Tuple = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
__lowerCAmelCase : str = AlbertTokenizer
__lowerCAmelCase : Optional[Any] = AlbertTokenizerFast
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : str = True
__lowerCAmelCase : Dict = True
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : List[str] = AlbertTokenizer(lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple = '''this is a test'''
UpperCAmelCase : Any = '''this is a test'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[int] = '''<pad>'''
UpperCAmelCase : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(lowerCAmelCase_ ) , 30000 )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase : Tuple = self.get_tokenizer()
UpperCAmelCase : str = self.get_rust_tokenizer()
UpperCAmelCase : Tuple = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase : Dict = tokenizer.tokenize(lowerCAmelCase_ )
UpperCAmelCase : str = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase : Dict = self.get_rust_tokenizer()
UpperCAmelCase : Optional[int] = tokenizer.encode(lowerCAmelCase_ )
UpperCAmelCase : str = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = AlbertTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
UpperCAmelCase : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [48, 25, 21, 1289] )
UpperCAmelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
UpperCAmelCase : int = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : int = AlbertTokenizer(lowerCAmelCase_ )
UpperCAmelCase : str = tokenizer.encode("""sequence builders""" )
UpperCAmelCase : str = tokenizer.encode("""multi-sequence build""" )
UpperCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 109 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',
'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',
'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',
'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',
'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',
'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',
'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',
'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',
'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',
'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : int = '''xlm'''
__UpperCamelCase : Optional[Any] = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self : List[Any] , lowerCAmelCase_ : Dict=3_0_1_4_5 , lowerCAmelCase_ : Union[str, Any]=2_0_4_8 , lowerCAmelCase_ : List[str]=1_2 , lowerCAmelCase_ : Optional[Any]=1_6 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Tuple=5_1_2 , lowerCAmelCase_ : Tuple=2_0_4_8**-0.5 , lowerCAmelCase_ : List[str]=1e-12 , lowerCAmelCase_ : Any=0.02 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Union[str, Any]="first" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Tuple=5 , lowerCAmelCase_ : Tuple=5 , lowerCAmelCase_ : Optional[int]=0 , lowerCAmelCase_ : Optional[Any]=0 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Dict=0 , **lowerCAmelCase_ : List[Any] , ):
"""simple docstring"""
_A: Dict = vocab_size
_A: Dict = emb_dim
_A: Optional[Any] = n_layers
_A: str = n_heads
_A: Optional[int] = dropout
_A: Union[str, Any] = attention_dropout
_A: Any = gelu_activation
_A: Optional[Any] = sinusoidal_embeddings
_A: Any = causal
_A: Optional[int] = asm
_A: List[str] = n_langs
_A: int = use_lang_emb
_A: Any = layer_norm_eps
_A: Tuple = bos_index
_A: Any = eos_index
_A: List[str] = pad_index
_A: str = unk_index
_A: str = mask_index
_A: List[Any] = is_encoder
_A: List[Any] = max_position_embeddings
_A: str = embed_init_std
_A: int = init_std
_A: List[str] = summary_type
_A: List[str] = summary_use_proj
_A: Tuple = summary_activation
_A: Dict = summary_proj_to_labels
_A: Tuple = summary_first_dropout
_A: str = start_n_top
_A: str = end_n_top
_A: Optional[Any] = mask_token_id
_A: Any = lang_id
if "n_words" in kwargs:
_A: Union[str, Any] = kwargs['''n_words''']
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@property
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
_A: int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_A: Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 121 | 0 |
from maths.prime_check import is_prime
def SCREAMING_SNAKE_CASE__ ( __a ):
if not isinstance(__a , __a ):
snake_case_ : Dict = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__a )
if is_prime(__a ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ ):
@register_to_config
def __init__( self : Optional[Any] , *,
_A : int = 4 , _A : int = 768 , _A : int , _A : Tuple , ) -> Dict:
"""simple docstring"""
super().__init__()
snake_case_ : int = nn.Parameter(torch.zeros(_A ) )
# parameters for additional clip time embeddings
snake_case_ : Tuple = nn.Linear(_A , _A )
snake_case_ : List[Any] = nn.Linear(_A , _A )
# parameters for encoder hidden states
snake_case_ : Union[str, Any] = clip_extra_context_tokens
snake_case_ : str = nn.Linear(
_A , self.clip_extra_context_tokens * cross_attention_dim )
snake_case_ : Any = nn.Linear(_A , _A )
snake_case_ : Tuple = nn.LayerNorm(_A )
def UpperCAmelCase_ ( self : List[str] , *, _A : Tuple , _A : List[Any] , _A : str , _A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
snake_case_ : Optional[int] = image_embeddings.shape[0]
snake_case_ : Optional[Any] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
snake_case_ : Optional[Any] = classifier_free_guidance_embeddings.expand(
_A , -1 )
snake_case_ : Any = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
snake_case_ : str = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
snake_case_ : str = self.embedding_proj(_A )
snake_case_ : Dict = self.clip_image_embeddings_project_to_time_embeddings(_A )
snake_case_ : Tuple = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
snake_case_ : List[str] = self.clip_extra_context_tokens_proj(_A )
snake_case_ : Optional[Any] = clip_extra_context_tokens.reshape(_A , -1 , self.clip_extra_context_tokens )
snake_case_ : int = clip_extra_context_tokens.permute(0 , 2 , 1 )
snake_case_ : Optional[int] = self.encoder_hidden_states_proj(_A )
snake_case_ : Any = self.text_encoder_hidden_states_norm(_A )
snake_case_ : Dict = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 88 | 1 |
import numpy as np
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
__lowercase : Any = int(np.ceil((x_end - xa) / h ) )
__lowercase : Any = np.zeros((n + 1,) )
__lowercase : int = ya
__lowercase : List[str] = xa
for k in range(__lowerCAmelCase ):
__lowercase : Union[str, Any] = f(__lowerCAmelCase , y[k] )
__lowercase : Dict = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__lowercase : Dict = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__lowercase : Optional[Any] = f(x + h , y[k] + h * ka )
__lowercase : Union[str, Any] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 156 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__lowerCAmelCase : int = logging.get_logger(__name__)
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = ['''pixel_values''']
def __init__( self : Dict , _snake_case : bool = True , _snake_case : Optional[Dict[str, int]] = None , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : bool = True , _snake_case : Union[int, float] = 1 / 255 , _snake_case : bool = True , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , **_snake_case : Tuple , ):
super().__init__(**_snake_case )
__lowercase : List[Any] = size if size is not None else {'''shortest_edge''': 256}
__lowercase : Dict = get_size_dict(_snake_case , default_to_square=_snake_case )
__lowercase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__lowercase : Any = get_size_dict(_snake_case , param_name='''crop_size''' )
__lowercase : int = do_resize
__lowercase : Union[str, Any] = size
__lowercase : Optional[int] = resample
__lowercase : str = do_center_crop
__lowercase : str = crop_size
__lowercase : Optional[int] = do_rescale
__lowercase : str = rescale_factor
__lowercase : List[Any] = do_normalize
__lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case_ ( self : List[str] , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : PILImageResampling = PILImageResampling.BICUBIC , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Any , ):
__lowercase : Union[str, Any] = get_size_dict(_snake_case , default_to_square=_snake_case )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__lowercase : Dict = get_resize_output_image_size(_snake_case , size=size['''shortest_edge'''] , default_to_square=_snake_case )
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case )
def snake_case_ ( self : Union[str, Any] , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : str , ):
__lowercase : str = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_snake_case , size=(size['''height'''], size['''width''']) , data_format=_snake_case , **_snake_case )
def snake_case_ ( self : str , _snake_case : np.ndarray , _snake_case : float , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Any ):
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def snake_case_ ( self : Any , _snake_case : np.ndarray , _snake_case : Union[float, List[float]] , _snake_case : Union[float, List[float]] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : str , ):
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case )
def snake_case_ ( self : Optional[Any] , _snake_case : ImageInput , _snake_case : Optional[bool] = None , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = None , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[float] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_snake_case : int , ):
__lowercase : List[str] = do_resize if do_resize is not None else self.do_resize
__lowercase : str = size if size is not None else self.size
__lowercase : Any = get_size_dict(_snake_case , default_to_square=_snake_case )
__lowercase : str = resample if resample is not None else self.resample
__lowercase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : List[str] = crop_size if crop_size is not None else self.crop_size
__lowercase : int = get_size_dict(_snake_case , param_name='''crop_size''' )
__lowercase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Any = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
__lowercase : Dict = image_std if image_std is not None else self.image_std
__lowercase : Any = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowercase : List[str] = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
__lowercase : str = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images]
if do_center_crop:
__lowercase : Dict = [self.center_crop(image=_snake_case , size=_snake_case ) for image in images]
if do_rescale:
__lowercase : List[str] = [self.rescale(image=_snake_case , scale=_snake_case ) for image in images]
if do_normalize:
__lowercase : str = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case ) for image in images]
__lowercase : Dict = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
__lowercase : List[str] = {'''pixel_values''': images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
def snake_case_ ( self : Optional[int] , _snake_case : str , _snake_case : List[Tuple] = None ):
__lowercase : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_snake_case ) != len(_snake_case ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_snake_case ):
__lowercase : str = target_sizes.numpy()
__lowercase : Union[str, Any] = []
for idx in range(len(_snake_case ) ):
__lowercase : Union[str, Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_snake_case )
__lowercase : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_snake_case )
else:
__lowercase : str = logits.argmax(dim=1 )
__lowercase : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 156 | 1 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
_UpperCamelCase : int = gray_code_sequence_string(UpperCAmelCase_ )
#
# convert them to integers
for i in range(len(UpperCAmelCase_ ) ):
_UpperCamelCase : Any = int(sequence[i] , 2 )
return sequence
def A__ ( UpperCAmelCase_ ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_UpperCamelCase : Optional[Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_UpperCamelCase : Union[str, Any] = gray_code_sequence_string(bit_count - 1 )
_UpperCamelCase : List[Any] = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_UpperCamelCase : Union[str, Any] = '0' + smaller_sequence[i]
sequence.append(UpperCAmelCase_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_UpperCamelCase : Dict = '1' + smaller_sequence[i]
sequence.append(UpperCAmelCase_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
# Initialise PyTorch model
_UpperCamelCase : Any = LxmertConfig.from_json_file(UpperCAmelCase_ )
print(f'Building PyTorch model from configuration: {config}' )
_UpperCamelCase : int = LxmertForPreTraining(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCAmelCase_ )
if __name__ == "__main__":
snake_case_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case_ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 236 | 0 |
"""simple docstring"""
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 293 |
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if num <= 0:
raise ValueError("Input must be a positive integer" )
snake_case_ = [True] * (num + 1)
snake_case_ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , _A ):
snake_case_ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : Tuple = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 187 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , A : Dict , A : Tuple=7 , A : Optional[int]=3 , A : Dict=30 , A : int=4_00 , A : str=True , A : Optional[Any]=None , A : Dict=0.9 , A : Dict=None , A : Tuple=True , A : List[Any]=[0.5, 0.5, 0.5] , A : Union[str, Any]=[0.5, 0.5, 0.5] , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 30}
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 30, 'width': 30}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize_and_center_crop
_UpperCAmelCase = size
_UpperCAmelCase = crop_pct
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def _lowerCamelCase ( self : Tuple) -> int:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
UpperCamelCase = PoolFormerImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = PoolFormerImageProcessingTester(self)
@property
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize_and_center_crop'))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size'))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'crop_pct'))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize'))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean'))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std'))
def _lowerCamelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 30})
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30})
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {'shortest_edge': 42})
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84})
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
pass
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _lowerCamelCase ( self : str) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _lowerCamelCase ( self : List[Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 355 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 290 | 0 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class A_ ( lowerCamelCase__ ):
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Dict = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(__snake_case ,'hidden_sizes'))
self.parent.assertTrue(hasattr(__snake_case ,'neck_hidden_sizes'))
self.parent.assertTrue(hasattr(__snake_case ,'num_attention_heads'))
class A_ :
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_3 ,SCREAMING_SNAKE_CASE__ : Dict=3_2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3 ,SCREAMING_SNAKE_CASE__ : Any=6_4_0 ,SCREAMING_SNAKE_CASE__ : Any=4 ,SCREAMING_SNAKE_CASE__ : Optional[Any]="silu" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 ,SCREAMING_SNAKE_CASE__ : List[str]=3_2 ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : List[str]=0.1 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : int=1_0 ,SCREAMING_SNAKE_CASE__ : Tuple=None ,):
__lowerCamelCase : Dict = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : Any = image_size
__lowerCamelCase : List[str] = patch_size
__lowerCamelCase : Any = num_channels
__lowerCamelCase : List[Any] = last_hidden_size
__lowerCamelCase : Optional[int] = num_attention_heads
__lowerCamelCase : List[str] = hidden_act
__lowerCamelCase : Dict = conv_kernel_size
__lowerCamelCase : Union[str, Any] = output_stride
__lowerCamelCase : str = hidden_dropout_prob
__lowerCamelCase : Any = attention_probs_dropout_prob
__lowerCamelCase : Optional[Any] = classifier_dropout_prob
__lowerCamelCase : List[Any] = use_labels
__lowerCamelCase : Tuple = is_training
__lowerCamelCase : Optional[Any] = num_labels
__lowerCamelCase : Union[str, Any] = initializer_range
__lowerCamelCase : str = scope
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCamelCase : Any = None
__lowerCamelCase : str = None
if self.use_labels:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] ,self.num_labels)
__lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels)
__lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : Union[str, Any]):
return MobileViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Tuple = MobileViTModel(config=__snake_case)
model.to(__snake_case)
model.eval()
__lowerCamelCase : List[str] = model(__snake_case)
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : int = self.num_labels
__lowerCamelCase : Union[str, Any] = MobileViTForImageClassification(__snake_case)
model.to(__snake_case)
model.eval()
__lowerCamelCase : List[Any] = model(__snake_case ,labels=__snake_case)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : int = self.num_labels
__lowerCamelCase : List[str] = MobileViTForSemanticSegmentation(__snake_case)
model.to(__snake_case)
model.eval()
__lowerCamelCase : Tuple = model(__snake_case)
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
__lowerCamelCase : int = model(__snake_case ,labels=__snake_case)
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[str] = self.prepare_config_and_inputs()
__lowerCamelCase : Optional[Any] = config_and_inputs
__lowerCamelCase : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
_UpperCAmelCase : List[str] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase : List[Any] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Tuple = False
def lowerCAmelCase ( self : Any):
__lowerCamelCase : str = MobileViTModelTester(self)
__lowerCamelCase : Any = MobileViTConfigTester(self ,config_class=__snake_case ,has_text_modality=__snake_case)
def lowerCAmelCase ( self : Union[str, Any]):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds')
def lowerCAmelCase ( self : Optional[int]):
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings')
def lowerCAmelCase ( self : List[Any]):
pass
@unittest.skip(reason='MobileViT does not output attentions')
def lowerCAmelCase ( self : Optional[int]):
pass
def lowerCAmelCase ( self : str):
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class(__snake_case)
__lowerCamelCase : List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : List[Any] = [*signature.parameters.keys()]
__lowerCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__snake_case)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def lowerCAmelCase ( self : Tuple):
pass
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case)
def lowerCAmelCase ( self : Optional[int]):
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : Optional[Any] = model_class(__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
__lowerCamelCase : List[str] = model(**self._prepare_for_class(__snake_case ,__snake_case))
__lowerCamelCase : Optional[Any] = outputs.hidden_states
__lowerCamelCase : Dict = 5
self.assertEqual(len(__snake_case) ,__snake_case)
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__lowerCamelCase : List[str] = 2
for i in range(len(__snake_case)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,)
divisor *= 2
self.assertEqual(self.model_tester.output_stride ,divisor // 2)
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[int] = True
check_hidden_states_output(__snake_case ,__snake_case ,__snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Optional[Any] = True
check_hidden_states_output(__snake_case ,__snake_case ,__snake_case)
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case)
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__snake_case)
@slow
def lowerCAmelCase ( self : Optional[Any]):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Tuple = MobileViTModel.from_pretrained(__snake_case)
self.assertIsNotNone(__snake_case)
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
__lowerCamelCase : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self : Optional[Any]):
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small') if is_vision_available() else None
@slow
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Dict = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small').to(__snake_case)
__lowerCamelCase : Union[str, Any] = self.default_image_processor
__lowerCamelCase : List[str] = prepare_img()
__lowerCamelCase : Any = image_processor(images=__snake_case ,return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
__lowerCamelCase : int = model(**__snake_case)
# verify the logits
__lowerCamelCase : Dict = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape ,__snake_case)
__lowerCamelCase : List[str] = torch.tensor([-1.9364, -1.2327, -0.4653]).to(__snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__snake_case ,atol=1E-4))
@slow
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Optional[Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
__lowerCamelCase : Optional[Any] = model.to(__snake_case)
__lowerCamelCase : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
__lowerCamelCase : int = prepare_img()
__lowerCamelCase : List[str] = image_processor(images=__snake_case ,return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
__lowerCamelCase : Any = model(**__snake_case)
__lowerCamelCase : List[str] = outputs.logits
# verify the logits
__lowerCamelCase : Optional[Any] = torch.Size((1, 2_1, 3_2, 3_2))
self.assertEqual(logits.shape ,__snake_case)
__lowerCamelCase : Any = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] ,device=__snake_case ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,__snake_case ,atol=1E-4))
@slow
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
__lowerCamelCase : List[str] = model.to(__snake_case)
__lowerCamelCase : Optional[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
__lowerCamelCase : Optional[Any] = prepare_img()
__lowerCamelCase : str = image_processor(images=__snake_case ,return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
__lowerCamelCase : Tuple = model(**__snake_case)
__lowerCamelCase : Optional[Any] = outputs.logits.detach().cpu()
__lowerCamelCase : int = image_processor.post_process_semantic_segmentation(outputs=__snake_case ,target_sizes=[(5_0, 6_0)])
__lowerCamelCase : Optional[Any] = torch.Size((5_0, 6_0))
self.assertEqual(segmentation[0].shape ,__snake_case)
__lowerCamelCase : str = image_processor.post_process_semantic_segmentation(outputs=__snake_case)
__lowerCamelCase : int = torch.Size((3_2, 3_2))
self.assertEqual(segmentation[0].shape ,__snake_case)
| 73 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class a__:
def __init__( self : str , __snake_case : Union[str, Any] , __snake_case : List[str]=13 , __snake_case : Tuple=7 , __snake_case : Optional[Any]=False , __snake_case : Dict=True , __snake_case : List[Any]=False , __snake_case : Optional[int]=False , __snake_case : Optional[Any]=19 , __snake_case : Any=32 , __snake_case : Union[str, Any]=5 , __snake_case : Union[str, Any]=4 , __snake_case : int=37 , __snake_case : Union[str, Any]="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=0.1 , __snake_case : int=5_12 , __snake_case : int=16 , __snake_case : Tuple=2 , __snake_case : str=0.02 , __snake_case : str=3 , __snake_case : Dict=4 , __snake_case : List[Any]=None , ):
a : Tuple = parent
a : List[str] = batch_size
a : Optional[Any] = seq_length
a : Tuple = is_training
a : Optional[Any] = use_input_mask
a : List[Any] = use_token_type_ids
a : List[Any] = use_labels
a : int = vocab_size
a : Union[str, Any] = hidden_size
a : Any = num_hidden_layers
a : List[str] = num_attention_heads
a : int = intermediate_size
a : str = hidden_act
a : Tuple = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : List[str] = max_position_embeddings
a : Any = type_vocab_size
a : List[str] = type_sequence_label_size
a : Union[str, Any] = initializer_range
a : Optional[int] = num_labels
a : Optional[Any] = num_choices
a : Optional[int] = scope
def lowercase_ ( self : List[Any] ):
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : Dict = None
if self.use_input_mask:
a : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
a : Optional[Any] = None
a : Optional[int] = None
a : Dict = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
a : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : List[Any] ):
a : Any = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__snake_case , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Any ):
a : Tuple = EsmForProteinFolding(config=__snake_case ).float()
model.to(__snake_case )
model.eval()
a : Dict = model(__snake_case , attention_mask=__snake_case )
a : Union[str, Any] = model(__snake_case )
a : List[Any] = model(__snake_case )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowercase_ ( self : Optional[Any] ):
a : Tuple = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Optional[Any] = config_and_inputs
a : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = False
lowercase__ = (EsmForProteinFolding,) if is_torch_available() else ()
lowercase__ = ()
lowercase__ = {} if is_torch_available() else {}
lowercase__ = False
def lowercase_ ( self : int ):
a : Tuple = EsmFoldModelTester(self )
a : Any = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def lowercase_ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowercase_ ( self : Union[str, Any] ):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
@unittest.skip('Does not support attention outputs' )
def lowercase_ ( self : str ):
pass
@unittest.skip
def lowercase_ ( self : Optional[int] ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowercase_ ( self : Optional[int] ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowercase_ ( self : Any ):
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def lowercase_ ( self : Any ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : Union[str, Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : int ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def lowercase_ ( self : int ):
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def lowercase_ ( self : int ):
pass
@unittest.skip('ESMFold only has one output format.' )
def lowercase_ ( self : Dict ):
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def lowercase_ ( self : Tuple ):
pass
@unittest.skip('ESMFold does not support input chunking.' )
def lowercase_ ( self : List[str] ):
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase_ ( self : Union[str, Any] ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase_ ( self : Any ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase_ ( self : List[str] ):
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def lowercase_ ( self : Dict ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self : Union[str, Any] ):
pass
@require_torch
class a__( lowerCamelCase__ ):
@slow
def lowercase_ ( self : Optional[int] ):
a : Optional[Any] = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
a : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
a : Any = model(__snake_case )['positions']
a : Dict = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __snake_case , atol=1e-4 ) ) | 297 | 0 |
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def lowerCamelCase__ ( __snake_case = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(SCREAMING_SNAKE_CASE_ ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
_UpperCamelCase = QuantumRegister(SCREAMING_SNAKE_CASE_, '''qr''' )
_UpperCamelCase = ClassicalRegister(SCREAMING_SNAKE_CASE_, '''cr''' )
_UpperCamelCase = QuantumCircuit(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
_UpperCamelCase = number_of_qubits
for i in range(SCREAMING_SNAKE_CASE_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(SCREAMING_SNAKE_CASE_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j), SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(SCREAMING_SNAKE_CASE_, number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# simulate with 10000 shots
_UpperCamelCase = Aer.get_backend('''qasm_simulator''' )
_UpperCamelCase = execute(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, shots=1_00_00 )
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 366 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[int]:
"""simple docstring"""
if len(__snake_case ) <= 1 or n <= 1:
return
insert_next(__snake_case, n - 1 )
rec_insertion_sort(__snake_case, n - 1 )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
if index >= len(__snake_case ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
_UpperCamelCase , _UpperCamelCase = (
collection[index],
collection[index - 1],
)
insert_next(__snake_case, index + 1 )
if __name__ == "__main__":
_a = input("""Enter integers separated by spaces: """)
_a = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 100 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
a_ : Tuple = logging.get_logger(__name__)
a_ : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : List[Any] = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
a_ : List[Any] = {
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ["input_ids", "attention_mask"]
_lowerCamelCase = RobertaTokenizer
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="replace" , UpperCamelCase="<s>" , UpperCamelCase="</s>" , UpperCamelCase="</s>" , UpperCamelCase="<s>" , UpperCamelCase="<unk>" , UpperCamelCase="<pad>" , UpperCamelCase="<mask>" , UpperCamelCase=False , UpperCamelCase=True , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(
UpperCamelCase , UpperCamelCase , tokenizer_file=UpperCamelCase , errors=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase ) != add_prefix_space:
lowerCamelCase_ = getattr(UpperCamelCase , pre_tok_state.pop("type" ) )
lowerCamelCase_ = add_prefix_space
lowerCamelCase_ = pre_tok_class(**UpperCamelCase )
lowerCamelCase_ = add_prefix_space
lowerCamelCase_ = "post_processor"
lowerCamelCase_ = getattr(self.backend_tokenizer , UpperCamelCase , UpperCamelCase )
if tokenizer_component_instance:
lowerCamelCase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase_ = tuple(state["sep"] )
if "cls" in state:
lowerCamelCase_ = tuple(state["cls"] )
lowerCamelCase_ = False
if state.get("add_prefix_space" , UpperCamelCase ) != add_prefix_space:
lowerCamelCase_ = add_prefix_space
lowerCamelCase_ = True
if state.get("trim_offsets" , UpperCamelCase ) != trim_offsets:
lowerCamelCase_ = trim_offsets
lowerCamelCase_ = True
if changes_to_apply:
lowerCamelCase_ = getattr(UpperCamelCase , state.pop("type" ) )
lowerCamelCase_ = component_class(**UpperCamelCase )
setattr(self.backend_tokenizer , UpperCamelCase , UpperCamelCase )
@property
def snake_case ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else value
lowerCamelCase_ = value
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = kwargs.get("is_split_into_words" , UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = kwargs.get("is_split_into_words" , UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase=None ):
"""simple docstring"""
lowerCamelCase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 55 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = 13
lowerCamelCase_ = 7
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = 99
lowerCamelCase_ = 32
lowerCamelCase_ = 2
lowerCamelCase_ = 4
lowerCamelCase_ = 37
lowerCamelCase_ = "gelu"
lowerCamelCase_ = 0.1
lowerCamelCase_ = 0.1
lowerCamelCase_ = 512
lowerCamelCase_ = 16
lowerCamelCase_ = 2
lowerCamelCase_ = 0.02
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = None
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
"""simple docstring"""
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase_ = True
lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = TFEsmModel(config=UpperCamelCase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(UpperCamelCase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = True
lowerCamelCase_ = TFEsmModel(config=UpperCamelCase )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
lowerCamelCase_ = model(UpperCamelCase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(UpperCamelCase , encoder_hidden_states=UpperCamelCase )
# Also check the case where encoder outputs are not passed
lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = TFEsmForMaskedLM(config=UpperCamelCase )
lowerCamelCase_ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFEsmForTokenClassification(config=UpperCamelCase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class snake_case ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = TFEsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFEsmModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@unittest.skip("Protein models do not support embedding resizing." )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip("Protein models do not support embedding resizing." )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCamelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase_ = model.get_bias()
assert isinstance(UpperCamelCase , UpperCamelCase )
for k, v in name.items():
assert isinstance(UpperCamelCase , tf.Variable )
else:
lowerCamelCase_ = model.get_output_embeddings()
assert x is None
lowerCamelCase_ = model.get_bias()
assert name is None
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(UpperCamelCase )[0]
lowerCamelCase_ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , UpperCamelCase )
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(UpperCamelCase )[0]
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 55 | 1 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
UpperCamelCase , UpperCamelCase = input_paths_and_base_extractors[compression_format]
if input_path is None:
UpperCamelCase = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCamelCase_ )
assert base_extractor.is_extractable(UpperCamelCase_ )
UpperCamelCase = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(UpperCamelCase_ , UpperCamelCase_ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCamelCase = file_path.read_text(encoding="""utf-8""" )
else:
UpperCamelCase = output_path.read_text(encoding="""utf-8""" )
UpperCamelCase = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> List[str]:
'''simple docstring'''
UpperCamelCase = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
UpperCamelCase = input_paths[compression_format]
if input_path is None:
UpperCamelCase = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCamelCase_ )
UpperCamelCase = Extractor.infer_extractor_format(UpperCamelCase_ )
assert extractor_format is not None
UpperCamelCase = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCamelCase = file_path.read_text(encoding="""utf-8""" )
else:
UpperCamelCase = output_path.read_text(encoding="""utf-8""" )
UpperCamelCase = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
import tarfile
UpperCamelCase = tmp_path / """data_dot_dot"""
directory.mkdir()
UpperCamelCase = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(UpperCamelCase_ , """w""" ) as f:
f.add(UpperCamelCase_ , arcname=os.path.join("""..""" , text_file.name ) )
return path
@pytest.fixture
def lowercase( UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
import tarfile
UpperCamelCase = tmp_path / """data_sym_link"""
directory.mkdir()
UpperCamelCase = directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=UpperCamelCase_ )
with tarfile.TarFile(UpperCamelCase_ , """w""" ) as f:
f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
UpperCamelCase = insecure_tar_files[insecure_tar_file]
UpperCamelCase = tmp_path / """extracted"""
TarExtractor.extract(UpperCamelCase_ , UpperCamelCase_ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowercase( UpperCamelCase_ ) -> int:
'''simple docstring'''
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
UpperCamelCase = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
UpperCamelCase = (
b"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
b"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
b"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
b"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""" ) as f:
f.write(UpperCamelCase_ )
assert zipfile.is_zipfile(str(UpperCamelCase_ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(UpperCamelCase_ ) # but we're right
| 165 | from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def __init__( self : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCamelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self : Union[str, Any] , lowerCamelCase_ : int = 1 , lowerCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , ):
"""simple docstring"""
if isinstance(self.unet.config.sample_size , lowerCamelCase_ ):
UpperCamelCase = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
UpperCamelCase = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCamelCase = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase = self.scheduler.step(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , eta=lowerCamelCase_ , use_clipped_model_output=lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 165 | 1 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = ['''image_processor''', '''tokenizer''']
lowerCamelCase_ : Optional[Any] = '''BlipImageProcessor'''
lowerCamelCase_ : Dict = '''AutoTokenizer'''
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
super().__init__(__magic_name__ , __magic_name__ )
# add QFormer tokenizer
snake_case_ : List[Any] = qformer_tokenizer
def __call__(self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = True , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = 0 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = True , __magic_name__ = None , **__magic_name__ , ) -> BatchFeature:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
snake_case_ : int = BatchFeature()
if text is not None:
snake_case_ : Tuple = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_token_type_ids=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
encoding.update(__magic_name__ )
snake_case_ : Optional[Any] = self.qformer_tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_token_type_ids=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
snake_case_ : List[Any] = qformer_text_encoding.pop('''input_ids''' )
snake_case_ : str = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
snake_case_ : int = self.image_processor(__magic_name__ , return_tensors=__magic_name__ )
encoding.update(__magic_name__ )
return encoding
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = self.tokenizer.model_input_names
snake_case_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCamelCase (self , __magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
if os.path.isfile(__magic_name__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
snake_case_ : Tuple = os.path.join(__magic_name__ , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(__magic_name__ )
return super().save_pretrained(__magic_name__ , **__magic_name__ )
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = AutoTokenizer.from_pretrained(__magic_name__ , subfolder='''qformer_tokenizer''' )
snake_case_ : List[Any] = cls._get_arguments_from_pretrained(__magic_name__ , **__magic_name__ )
args.append(__magic_name__ )
return cls(*__magic_name__ )
| 279 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Any = '''megatron-bert'''
def __init__(self , __magic_name__=2_9056 , __magic_name__=1024 , __magic_name__=24 , __magic_name__=16 , __magic_name__=4096 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=0 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : int = hidden_act
snake_case_ : List[str] = intermediate_size
snake_case_ : Dict = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : Any = type_vocab_size
snake_case_ : int = initializer_range
snake_case_ : int = layer_norm_eps
snake_case_ : List[str] = position_embedding_type
snake_case_ : Dict = use_cache
| 279 | 1 |
'''simple docstring'''
def lowercase (_A , _A ):
"""simple docstring"""
while second != 0:
_lowerCAmelCase : List[Any] = first & second
first ^= second
_lowerCAmelCase : str = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : List[str] = int(input("""Enter the first number: """).strip())
lowerCAmelCase : List[str] = int(input("""Enter the second number: """).strip())
print(F'''{add(first, second) = }''')
| 25 |
'''simple docstring'''
from math import isqrt
def lowercase (_A ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) )
def lowercase (_A = 1_0**6 ):
"""simple docstring"""
_lowerCAmelCase : str = 0
_lowerCAmelCase : str = 1
_lowerCAmelCase : List[str] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_A )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ) -> Dict:
lowerCamelCase : Optional[Any] = parent
lowerCamelCase : List[str] = 13
lowerCamelCase : List[Any] = 7
lowerCamelCase : List[str] = True
lowerCamelCase : Optional[Any] = True
lowerCamelCase : Tuple = True
lowerCamelCase : str = True
lowerCamelCase : List[Any] = 99
lowerCamelCase : Union[str, Any] = 384
lowerCamelCase : str = 2
lowerCamelCase : Optional[Any] = 4
lowerCamelCase : Any = 37
lowerCamelCase : str = "gelu"
lowerCamelCase : Optional[Any] = 0.1
lowerCamelCase : str = 0.1
lowerCamelCase : str = 512
lowerCamelCase : Optional[Any] = 16
lowerCamelCase : Dict = 2
lowerCamelCase : Optional[int] = 0.02
lowerCamelCase : List[Any] = 3
lowerCamelCase : Optional[Any] = 4
lowerCamelCase : int = 128
lowerCamelCase : Tuple = 2
lowerCamelCase : str = 9
lowerCamelCase : List[Any] = 1
lowerCamelCase : Any = None
def _lowercase ( self ) -> int:
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : str = None
if self.use_input_mask:
lowerCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : int = None
if self.use_token_type_ids:
lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase : List[Any] = None
lowerCamelCase : Union[str, Any] = None
lowerCamelCase : Optional[Any] = None
if self.use_labels:
lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : str = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
lowerCamelCase : Tuple = TFConvBertModel(config=_UpperCAmelCase )
lowerCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCamelCase : Optional[Any] = [input_ids, input_mask]
lowerCamelCase : str = model(_UpperCAmelCase )
lowerCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
lowerCamelCase : int = TFConvBertForMaskedLM(config=_UpperCAmelCase )
lowerCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCamelCase : List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
lowerCamelCase : Union[str, Any] = self.num_labels
lowerCamelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_UpperCAmelCase )
lowerCamelCase : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCamelCase : Optional[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Optional[int] = self.num_choices
lowerCamelCase : List[Any] = TFConvBertForMultipleChoice(config=_UpperCAmelCase )
lowerCamelCase : Optional[int] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : str = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : List[str] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowerCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
lowerCamelCase : List[str] = self.num_labels
lowerCamelCase : Tuple = TFConvBertForTokenClassification(config=_UpperCAmelCase )
lowerCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCamelCase : Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
lowerCamelCase : int = TFConvBertForQuestionAnswering(config=_UpperCAmelCase )
lowerCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCamelCase : Any = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self ) -> str:
lowerCamelCase : str = self.prepare_config_and_inputs()
(
lowerCamelCase
) : Any = config_and_inputs
lowerCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase__ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase_ : Tuple = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : Optional[Any] = False
lowerCamelCase_ : int = False
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : Tuple = TFConvBertModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def _lowercase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Dict:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _lowercase ( self ) -> Tuple:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def _lowercase ( self ) -> Tuple:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def _lowercase ( self ) -> Dict:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def _lowercase ( self ) -> Dict:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def _lowercase ( self ) -> Any:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : str = True
lowerCamelCase : int = True
if hasattr(_UpperCAmelCase , "use_cache" ):
lowerCamelCase : List[Any] = True
lowerCamelCase : List[str] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
lowerCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
for model_class in self.all_model_classes:
lowerCamelCase : Any = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase : int = model_class(_UpperCAmelCase )
lowerCamelCase : Any = len(model(_UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
lowerCamelCase : List[str] = os.path.join(_UpperCAmelCase , "saved_model" , "1" )
lowerCamelCase : List[str] = tf.keras.models.load_model(_UpperCAmelCase )
lowerCamelCase : Dict = model(_UpperCAmelCase )
if self.is_encoder_decoder:
lowerCamelCase : Any = outputs["encoder_hidden_states"]
lowerCamelCase : Tuple = outputs["encoder_attentions"]
else:
lowerCamelCase : Tuple = outputs["hidden_states"]
lowerCamelCase : Optional[int] = outputs["attentions"]
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
lowerCamelCase : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Tuple = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_UpperCAmelCase )
def _lowercase ( self ) -> Tuple:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : str = True
lowerCamelCase : Tuple = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
lowerCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
lowerCamelCase : Any = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
lowerCamelCase : List[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
def check_decoder_attentions_output(UpperCamelCase__ ):
lowerCamelCase : Dict = len(_UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
lowerCamelCase : List[str] = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCamelCase__ ):
lowerCamelCase : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCamelCase : Any = True
lowerCamelCase : Dict = False
lowerCamelCase : str = model_class(_UpperCAmelCase )
lowerCamelCase : Tuple = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
lowerCamelCase : List[Any] = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
lowerCamelCase : str = model_class(_UpperCAmelCase )
lowerCamelCase : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase : Optional[Any] = True
lowerCamelCase : Tuple = model_class(_UpperCAmelCase )
lowerCamelCase : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
lowerCamelCase : int = True
lowerCamelCase : str = True
lowerCamelCase : Optional[Any] = model_class(_UpperCAmelCase )
lowerCamelCase : Optional[int] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@require_tf
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ) -> str:
lowerCamelCase : Dict = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
lowerCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Optional[int] = model(_UpperCAmelCase )[0]
lowerCamelCase : Tuple = [1, 6, 768]
self.assertEqual(output.shape , _UpperCAmelCase )
lowerCamelCase : Any = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 )
| 48 |
'''simple docstring'''
from maths.prime_check import is_prime
def __lowerCAmelCase ( snake_case__ ):
if not isinstance(snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = F"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 358 |
'''simple docstring'''
import qiskit
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
_snake_case = qiskit.QuantumCircuit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_snake_case = qiskit.execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCAmelCase = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''') | 270 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__a = logging.get_logger(__name__)
def __snake_case( _lowerCAmelCase ) -> List[List[ImageInput]]:
if isinstance(_lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_lowerCAmelCase ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = ["pixel_values"]
def __init__( self : str , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : bool = True , snake_case_ : Union[int, float] = 1 / 255 , snake_case_ : bool = True , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , **snake_case_ : Union[str, Any] , ):
super().__init__(**snake_case_ )
snake_case__ : Dict = size if size is not None else {"""shortest_edge""": 224}
snake_case__ : Optional[int] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
snake_case__ : List[Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
snake_case__ : Dict = get_size_dict(snake_case_ , param_name="""crop_size""" )
snake_case__ : int = do_resize
snake_case__ : int = size
snake_case__ : Dict = do_center_crop
snake_case__ : List[str] = crop_size
snake_case__ : List[str] = resample
snake_case__ : Dict = do_rescale
snake_case__ : Tuple = rescale_factor
snake_case__ : Union[str, Any] = do_normalize
snake_case__ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case__ : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase ( self : Tuple , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : List[str] , ):
snake_case__ : Optional[int] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" in size:
snake_case__ : int = get_resize_output_image_size(snake_case_ , size["""shortest_edge"""] , default_to_square=snake_case_ )
elif "height" in size and "width" in size:
snake_case__ : str = (size["""height"""], size["""width"""])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase ( self : str , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : List[str] , ):
snake_case__ : Optional[Any] = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(snake_case_ , size=(size["""height"""], size["""width"""]) , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase ( self : Any , snake_case_ : np.ndarray , snake_case_ : Union[int, float] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : str , ):
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase ( self : Optional[int] , snake_case_ : np.ndarray , snake_case_ : Union[float, List[float]] , snake_case_ : Union[float, List[float]] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : List[str] , ):
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase ( self : Any , snake_case_ : ImageInput , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = None , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : bool = None , snake_case_ : float = None , snake_case_ : bool = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case__ : Union[str, Any] = to_numpy_array(snake_case_ )
if do_resize:
snake_case__ : Any = self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ )
if do_center_crop:
snake_case__ : Dict = self.center_crop(snake_case_ , size=snake_case_ )
if do_rescale:
snake_case__ : Optional[Any] = self.rescale(image=snake_case_ , scale=snake_case_ )
if do_normalize:
snake_case__ : Any = self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ )
snake_case__ : List[Any] = to_channel_dimension_format(snake_case_ , snake_case_ )
return image
def lowerCamelCase ( self : List[str] , snake_case_ : ImageInput , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = None , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : bool = None , snake_case_ : float = None , snake_case_ : bool = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : ChannelDimension = ChannelDimension.FIRST , **snake_case_ : Optional[int] , ):
snake_case__ : List[str] = do_resize if do_resize is not None else self.do_resize
snake_case__ : str = resample if resample is not None else self.resample
snake_case__ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ : str = image_mean if image_mean is not None else self.image_mean
snake_case__ : Union[str, Any] = image_std if image_std is not None else self.image_std
snake_case__ : List[str] = size if size is not None else self.size
snake_case__ : str = get_size_dict(snake_case_ , default_to_square=snake_case_ )
snake_case__ : List[str] = crop_size if crop_size is not None else self.crop_size
snake_case__ : str = get_size_dict(snake_case_ , param_name="""crop_size""" )
if not valid_images(snake_case_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
snake_case__ : Tuple = make_batched(snake_case_ )
snake_case__ : int = [
[
self._preprocess_image(
image=snake_case_ , do_resize=snake_case_ , size=snake_case_ , resample=snake_case_ , do_center_crop=snake_case_ , crop_size=snake_case_ , do_rescale=snake_case_ , rescale_factor=snake_case_ , do_normalize=snake_case_ , image_mean=snake_case_ , image_std=snake_case_ , data_format=snake_case_ , )
for img in video
]
for video in videos
]
snake_case__ : Union[str, Any] = {"""pixel_values""": videos}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 35 |
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = CustomTokenizer
pass
| 35 | 1 |
'''simple docstring'''
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class _snake_case (nn.Module):
def __init__( self ):
super().__init__()
UpperCAmelCase_ : Any = nn.Linear(3 ,4 )
UpperCAmelCase_ : Union[str, Any] = nn.BatchNormad(4 )
UpperCAmelCase_ : Dict = nn.Linear(4 ,5 )
def UpperCamelCase__ ( self ,_snake_case ):
return self.lineara(self.batchnorm(self.lineara(_snake_case ) ) )
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_snake_case ,model.state_dict() )
UpperCAmelCase_ : Optional[int] = os.path.join(_snake_case ,"index.json" )
self.assertTrue(os.path.isfile(_snake_case ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
UpperCAmelCase_ : str = os.path.join(_snake_case ,f'''{key}.dat''' )
self.assertTrue(os.path.isfile(_snake_case ) )
# TODO: add tests on the fact weights are properly loaded
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
UpperCAmelCase_ : str = torch.randn(2 ,3 ,dtype=_snake_case )
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Tuple = offload_weight(_snake_case ,"weight" ,_snake_case ,{} )
UpperCAmelCase_ : Dict = os.path.join(_snake_case ,"weight.dat" )
self.assertTrue(os.path.isfile(_snake_case ) )
self.assertDictEqual(_snake_case ,{"weight": {"shape": [2, 3], "dtype": str(_snake_case ).split("." )[1]}} )
UpperCAmelCase_ : str = load_offloaded_weight(_snake_case ,index["weight"] )
self.assertTrue(torch.equal(_snake_case ,_snake_case ) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = ModelForTest()
UpperCAmelCase_ : Optional[int] = model.state_dict()
UpperCAmelCase_ : int = {k: v for k, v in state_dict.items() if "linear2" not in k}
UpperCAmelCase_ : List[Any] = {k: v for k, v in state_dict.items() if "linear2" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = OffloadedWeightsLoader(state_dict=_snake_case ,save_folder=_snake_case )
# Every key is there with the right value
self.assertEqual(sorted(_snake_case ) ,sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_snake_case ,weight_map[key] ) )
UpperCAmelCase_ : Tuple = {k: v for k, v in state_dict.items() if "weight" in k}
UpperCAmelCase_ : Optional[int] = {k: v for k, v in state_dict.items() if "weight" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = OffloadedWeightsLoader(state_dict=_snake_case ,save_folder=_snake_case )
# Every key is there with the right value
self.assertEqual(sorted(_snake_case ) ,sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_snake_case ,weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_snake_case ,_snake_case )
# Duplicates are removed
UpperCAmelCase_ : Any = OffloadedWeightsLoader(state_dict=_snake_case ,save_folder=_snake_case )
# Every key is there with the right value
self.assertEqual(sorted(_snake_case ) ,sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_snake_case ,weight_map[key] ) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = {"a.1": 0, "a.10": 1, "a.2": 2}
UpperCAmelCase_ : List[str] = extract_submodules_state_dict(_snake_case ,["a.1", "a.2"] )
self.assertDictEqual(_snake_case ,{"a.1": 0, "a.2": 2} )
UpperCAmelCase_ : List[Any] = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2}
UpperCAmelCase_ : str = extract_submodules_state_dict(_snake_case ,["a.1", "a.2"] )
self.assertDictEqual(_snake_case ,{"a.1.a": 0, "a.2.a": 2} )
| 67 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowerCamelCase = logging.get_logger(__name__)
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ):
UpperCAmelCase_ : List[Any] = feature_size
UpperCAmelCase_ : Any = sampling_rate
UpperCAmelCase_ : Any = padding_value
UpperCAmelCase_ : Any = kwargs.pop("padding_side" ,"right" )
UpperCAmelCase_ : int = kwargs.pop("return_attention_mask" ,_snake_case )
super().__init__(**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = True ,_snake_case = None ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = None ,):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(_snake_case ,(list, tuple) ) and isinstance(processed_features[0] ,(dict, BatchFeature) ):
UpperCAmelCase_ : Dict = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys() )}''' )
UpperCAmelCase_ : Optional[int] = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : Dict = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_snake_case ) == 0:
if return_attention_mask:
UpperCAmelCase_ : List[str] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
UpperCAmelCase_ : Tuple = required_input[0]
if isinstance(_snake_case ,(list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
UpperCAmelCase_ : int = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_snake_case ):
UpperCAmelCase_ : str = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_snake_case ):
UpperCAmelCase_ : Any = "tf"
elif is_torch_tensor(_snake_case ):
UpperCAmelCase_ : Optional[int] = "pt"
elif isinstance(_snake_case ,(int, float, list, tuple, np.ndarray) ):
UpperCAmelCase_ : Any = "np"
else:
raise ValueError(
f'''type of {first_element} unknown: {type(_snake_case )}. '''
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] ,(int, float) ):
UpperCAmelCase_ : Optional[Any] = to_numpy(_snake_case )
else:
UpperCAmelCase_ : Any = [to_numpy(_snake_case ) for v in value]
# Convert padding_strategy in PaddingStrategy
UpperCAmelCase_ : List[Any] = self._get_padding_strategies(padding=_snake_case ,max_length=_snake_case )
UpperCAmelCase_ : Dict = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : str = len(_snake_case )
if not all(len(_snake_case ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
UpperCAmelCase_ : Dict = []
for i in range(_snake_case ):
UpperCAmelCase_ : List[Any] = {k: v[i] for k, v in processed_features.items()}
# truncation
UpperCAmelCase_ : Dict = self._truncate(
_snake_case ,max_length=_snake_case ,pad_to_multiple_of=_snake_case ,truncation=_snake_case ,)
truncated_inputs.append(_snake_case )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
UpperCAmelCase_ : List[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
UpperCAmelCase_ : str = PaddingStrategy.MAX_LENGTH
UpperCAmelCase_ : Dict = {}
for i in range(_snake_case ):
# padding
UpperCAmelCase_ : Dict = self._pad(
truncated_inputs[i] ,max_length=_snake_case ,padding_strategy=_snake_case ,pad_to_multiple_of=_snake_case ,return_attention_mask=_snake_case ,)
for key, value in outputs.items():
if key not in batch_outputs:
UpperCAmelCase_ : Optional[Any] = []
if value.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ : str = value.astype(np.floataa )
batch_outputs[key].append(_snake_case )
return BatchFeature(_snake_case ,tensor_type=_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = PaddingStrategy.DO_NOT_PAD ,_snake_case = None ,_snake_case = None ,):
UpperCAmelCase_ : Any = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
UpperCAmelCase_ : Any = len(_snake_case )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : Optional[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : str = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_snake_case ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
UpperCAmelCase_ : List[str] = np.ones(len(_snake_case ) ,dtype=np.intaa )
if needs_to_be_padded:
UpperCAmelCase_ : Union[str, Any] = max_length - len(_snake_case )
if self.padding_side == "right":
if return_attention_mask:
UpperCAmelCase_ : str = np.pad(
processed_features["attention_mask"] ,(0, difference) )
UpperCAmelCase_ : str = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
UpperCAmelCase_ : int = np.pad(
_snake_case ,_snake_case ,"constant" ,constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
UpperCAmelCase_ : List[Any] = np.pad(
processed_features["attention_mask"] ,(difference, 0) )
UpperCAmelCase_ : List[Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
UpperCAmelCase_ : Union[str, Any] = np.pad(
_snake_case ,_snake_case ,"constant" ,constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
UpperCAmelCase_ : List[Any] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : int = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Dict = len(_snake_case ) > max_length
if needs_to_be_truncated:
UpperCAmelCase_ : Any = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
UpperCAmelCase_ : str = processed_features["attention_mask"][:max_length]
return processed_features
def UpperCamelCase__ ( self ,_snake_case=False ,_snake_case=None ):
# Get padding strategy
if padding is not False:
if padding is True:
UpperCAmelCase_ : Optional[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : str = PaddingStrategy(_snake_case )
elif isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : List[Any] = padding
else:
UpperCAmelCase_ : List[str] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 67 | 1 |
"""simple docstring"""
from __future__ import annotations
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
if len(A_ ) == 0:
return []
lowerCamelCase__ : List[str] =min(A_ ), max(A_ )
lowerCamelCase__ : Any =int(max_value - min_value ) + 1
lowerCamelCase__ : list[list] =[[] for _ in range(A_ )]
for i in my_list:
buckets[int(i - min_value )].append(A_ )
return [v for bucket in buckets for v in sorted(A_ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 238 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def lowerCAmelCase_ ( A_ ,A_ ,A_ ,A_ ,A_):
UpperCamelCase__: List[str] = cva.getAffineTransform(A_ ,A_)
return cva.warpAffine(A_ ,A_ ,(rows, cols))
if __name__ == "__main__":
# read original image
A__: Union[str, Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
A__: Tuple = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A__ , A__: List[Any] = gray_img.shape
# set different points to rotate image
A__: Tuple = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
A__: Dict = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
A__: Any = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
A__: Union[str, Any] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
A__: str = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A__: Optional[int] = plt.figure(1)
A__: List[str] = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 149 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = StableDiffusionInpaintPipeline
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__snake_case = frozenset([] )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
A__ : List[Any] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__snake_case , )
A__ : Dict =PNDMScheduler(skip_prk_steps=__snake_case )
torch.manual_seed(0 )
A__ : List[Any] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
A__ : int =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
A__ : List[Any] =CLIPTextModel(__snake_case )
A__ : List[Any] =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Optional[Any] ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int=0 ) -> List[Any]:
'''simple docstring'''
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
A__ : Optional[int] =floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
A__ : Any =image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ : Dict =Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((64, 64) )
A__ : Dict =Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(__snake_case ).startswith("""mps""" ):
A__ : Dict =torch.manual_seed(__snake_case )
else:
A__ : Dict =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
A__ : List[Any] ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[Any] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Optional[int] =self.get_dummy_components()
A__ : List[Any] =StableDiffusionInpaintPipeline(**__snake_case )
A__ : List[str] =sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
A__ : Union[str, Any] =self.get_dummy_inputs(__snake_case )
A__ : List[str] =sd_pipe(**__snake_case ).images
A__ : Any =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ : Optional[int] =np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : List[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : Optional[int] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
A__ : str ="""stabilityai/stable-diffusion-2-inpainting"""
A__ : Any =StableDiffusionInpaintPipeline.from_pretrained(__snake_case , safety_checker=__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
A__ : Dict ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : Tuple =torch.manual_seed(0 )
A__ : Tuple =pipe(
prompt=__snake_case , image=__snake_case , mask_image=__snake_case , generator=__snake_case , output_type="""np""" , )
A__ : Optional[Any] =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
A__ : List[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : List[str] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : Optional[Any] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
A__ : Optional[int] ="""stabilityai/stable-diffusion-2-inpainting"""
A__ : Tuple =StableDiffusionInpaintPipeline.from_pretrained(
__snake_case , torch_dtype=torch.floataa , safety_checker=__snake_case , )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
A__ : List[str] ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : Optional[Any] =torch.manual_seed(0 )
A__ : Dict =pipe(
prompt=__snake_case , image=__snake_case , mask_image=__snake_case , generator=__snake_case , output_type="""np""" , )
A__ : Optional[Any] =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ : Tuple =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : Optional[int] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : Dict ="""stabilityai/stable-diffusion-2-inpainting"""
A__ : Any =PNDMScheduler.from_pretrained(__snake_case , subfolder="""scheduler""" )
A__ : int =StableDiffusionInpaintPipeline.from_pretrained(
__snake_case , safety_checker=__snake_case , scheduler=__snake_case , torch_dtype=torch.floataa , )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A__ : Any ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : Tuple =torch.manual_seed(0 )
A__ : str =pipe(
prompt=__snake_case , image=__snake_case , mask_image=__snake_case , generator=__snake_case , num_inference_steps=2 , output_type="""np""" , )
A__ : str =torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 371 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'FlavaImageProcessor'
__snake_case = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ : Any =None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCAmelCase_ , )
A__ : Optional[Any] =kwargs.pop("""feature_extractor""" )
A__ : List[Any] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : int =self.image_processor
def __call__( self : Union[str, Any] , lowerCAmelCase_ : Optional[ImageInput] = None , lowerCAmelCase_ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase_ : Union[bool, str, TruncationStrategy] = False , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : Optional[int] , ) -> str:
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
A__ : int =self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
if images is not None:
A__ : List[str] =self.image_processor(
lowerCAmelCase_ , return_image_mask=lowerCAmelCase_ , return_codebook_pixels=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
if text is not None and images is not None:
encoding.update(lowerCAmelCase_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def lowercase__ ( self : Any , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Optional[int] ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase__ ( self : List[Any] , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Any ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ : Any =self.tokenizer.model_input_names
A__ : Optional[Any] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCAmelCase_ , )
return self.image_processor_class
@property
def lowercase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCAmelCase_ , )
return self.image_processor
| 136 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_A = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""BeitFeatureExtractor"""]
_A = ["""BeitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BeitForImageClassification""",
"""BeitForMaskedImageModeling""",
"""BeitForSemanticSegmentation""",
"""BeitModel""",
"""BeitPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""FlaxBeitForImageClassification""",
"""FlaxBeitForMaskedImageModeling""",
"""FlaxBeitModel""",
"""FlaxBeitPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 242 |
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
def count_of_possible_combinations(__UpperCAmelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
def count_of_possible_combinations_with_dp_array(
__UpperCAmelCase , __UpperCAmelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCAmelCase__ : str = sum(
count_of_possible_combinations_with_dp_array(target - item , __UpperCAmelCase )
for item in array )
lowerCAmelCase__ : List[str] = answer
return answer
lowerCAmelCase__ : Dict = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
lowerCAmelCase__ : int = [0] * (target + 1)
lowerCAmelCase__ : int = 1
for i in range(1 , target + 1 ):
for j in range(__UpperCAmelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = 3
_A = 5
_A = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 242 | 1 |
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> float:
"""simple docstring"""
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 249 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = XGLMConfig
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = """gelu"""
def __init__( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any]=1_4 , UpperCamelCase__ : int=7 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : List[str]=9_9 , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : str=3_7 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Union[str, Any]=5_1_2 , UpperCamelCase__ : Optional[Any]=0.0_2 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = ffn_dim
UpperCamelCase = activation_function
UpperCamelCase = activation_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = max_position_embeddings
UpperCamelCase = initializer_range
UpperCamelCase = None
UpperCamelCase = 0
UpperCamelCase = 2
UpperCamelCase = 1
def A ( self : Union[str, Any] ):
"""simple docstring"""
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = self.get_config()
UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def A ( self : Union[str, Any] ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=UpperCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=UpperCamelCase__ , )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE = (TFXGLMForCausalLM,) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = TFXGLMModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , n_embd=3_7 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def A ( self : List[str] ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFXGLMModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def A ( self : Dict ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Optional[int] , UpperCamelCase__ : Tuple=True ):
"""simple docstring"""
UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
UpperCamelCase = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCamelCase = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
UpperCamelCase = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCamelCase__ )
@slow
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
UpperCamelCase = tokenizer('Today is a nice day and' , return_tensors='tf' )
UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
UpperCamelCase = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ , seed=[7, 0] )
UpperCamelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCamelCase__ )
UpperCamelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
UpperCamelCase = 'left'
# use different length sentences to test batching
UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
UpperCamelCase = tokenizer(UpperCamelCase__ , return_tensors='tf' , padding=UpperCamelCase__ )
UpperCamelCase = inputs['input_ids']
UpperCamelCase = model.generate(input_ids=UpperCamelCase__ , attention_mask=inputs['attention_mask'] , max_new_tokens=1_2 )
UpperCamelCase = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
UpperCamelCase = model.generate(input_ids=UpperCamelCase__ , max_new_tokens=1_2 )
UpperCamelCase = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
UpperCamelCase = model.generate(input_ids=UpperCamelCase__ , max_new_tokens=1_2 )
UpperCamelCase = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
UpperCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCamelCase__ )
UpperCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCamelCase__ )
UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , [non_padded_sentence, padded_sentence] )
| 249 | 1 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_SCREAMING_SNAKE_CASE = ["""text""", """image""", """audio"""]
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Optional[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((5_12, 5_12) ) )
elif input_type == "audio":
inputs.append(torch.ones(30_00 ) )
elif isinstance(__a , __a ):
inputs.append(create_inputs(__a ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : int = []
for output in outputs:
if isinstance(__a , (str, AgentText) ):
output_types.append('text' )
elif isinstance(__a , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(__a , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class SCREAMING_SNAKE_CASE_ :
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
snake_case_ : Any = self.tool.inputs
for _input in inputs:
if isinstance(_input , _A ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case_ : Optional[int] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
"""simple docstring"""
snake_case_ : Optional[Any] = create_inputs(self.tool.inputs )
snake_case_ : int = self.tool(*_A )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case_ : Tuple = [outputs]
self.assertListEqual(output_types(_A ) , self.tool.outputs )
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Any = create_inputs(self.tool.inputs )
snake_case_ : Tuple = self.tool(*_A )
if not isinstance(_A , _A ):
snake_case_ : List[str] = [outputs]
self.assertEqual(len(_A ) , len(self.tool.outputs ) )
for output, output_type in zip(_A , self.tool.outputs ):
snake_case_ : List[Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_A , _A ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : int = create_inputs(self.tool.inputs )
snake_case_ : Any = []
for _input, input_type in zip(_A , self.tool.inputs ):
if isinstance(_A , _A ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case_ : List[Any] = self.tool(*_A )
if not isinstance(_A , _A ):
snake_case_ : Union[str, Any] = [outputs]
self.assertEqual(len(_A ) , len(self.tool.outputs ) )
| 327 |
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowercase__ :Union[str, Any] = logging.get_logger(__name__)
lowercase__ :Tuple = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Dict ='''trajectory_transformer'''
lowercase_ : Union[str, Any] =['''past_key_values''']
lowercase_ : Tuple ={
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,A__=1_0_0 ,A__=5 ,A__=1 ,A__=1 ,A__=2_4_9 ,A__=6 ,A__=1_7 ,A__=2_5 ,A__=4 ,A__=4 ,A__=1_2_8 ,A__=0.1 ,A__=0.1 ,A__=0.1 ,A__=0.0006 ,A__=5_1_2 ,A__=0.02 ,A__=1E-12 ,A__=1 ,A__=True ,A__=1 ,A__=5_0_2_5_6 ,A__=5_0_2_5_6 ,**A__ ,):
lowercase = vocab_size
lowercase = action_weight
lowercase = reward_weight
lowercase = value_weight
lowercase = max_position_embeddings
lowercase = block_size
lowercase = action_dim
lowercase = observation_dim
lowercase = transition_dim
lowercase = learning_rate
lowercase = n_layer
lowercase = n_head
lowercase = n_embd
lowercase = embd_pdrop
lowercase = attn_pdrop
lowercase = resid_pdrop
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = kaiming_initializer_range
lowercase = use_cache
super().__init__(pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ ,**A__)
| 364 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
def __init__( self ,A__ ,A__=9_9 ,A__=1_3 ,A__=1_6 ,A__=7 ,A__=True ,A__=True ,A__=True ,A__=False ,A__=True ,A__=2 ,A__=3_2 ,A__=4 ,A__=4 ,A__=3_0 ,A__=0 ,A__=1 ,A__=2 ,A__=None ,):
lowercase = parent
lowercase = batch_size
lowercase = decoder_seq_length
# For common tests
lowercase = self.decoder_seq_length
lowercase = is_training
lowercase = use_attention_mask
lowercase = use_labels
lowercase = vocab_size
lowercase = d_model
lowercase = d_model
lowercase = decoder_layers
lowercase = decoder_layers
lowercase = decoder_ffn_dim
lowercase = decoder_attention_heads
lowercase = decoder_attention_heads
lowercase = eos_token_id
lowercase = bos_token_id
lowercase = pad_token_id
lowercase = decoder_start_token_id
lowercase = use_cache
lowercase = max_position_embeddings
lowercase = None
lowercase = decoder_seq_length
lowercase = 2
lowercase = 1
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size)
lowercase = None
if self.use_attention_mask:
lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,vocab_size=2)
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size)
lowercase = TrOCRConfig(
vocab_size=self.vocab_size ,d_model=self.d_model ,decoder_layers=self.decoder_layers ,decoder_ffn_dim=self.decoder_ffn_dim ,decoder_attention_heads=self.decoder_attention_heads ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,use_cache=self.use_cache ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,max_position_embeddings=self.max_position_embeddings ,)
return (config, input_ids, attention_mask, lm_labels)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,):
lowercase = True
lowercase = TrOCRDecoder(config=A__).to(A__).eval()
lowercase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
lowercase = model(A__ ,use_cache=A__)
lowercase = model(A__)
lowercase = model(A__ ,use_cache=A__)
self.parent.assertTrue(len(A__) == len(A__))
self.parent.assertTrue(len(A__) == len(A__) + 1)
lowercase = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
lowercase = ids_tensor((2, 1) ,config.vocab_size - 1) + 1
# append to next input_ids and
lowercase = torch.cat([input_ids, next_tokens] ,dim=-1)
lowercase = model(A__)['''last_hidden_state''']
lowercase = model(A__ ,past_key_values=A__)['''last_hidden_state''']
# select random slice
lowercase = ids_tensor((1,) ,output_from_past.shape[-1]).item()
lowercase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
lowercase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(A__ ,A__ ,atol=1E-3)
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Any =(TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowercase_ : Dict =(TrOCRForCausalLM,) if is_torch_available() else ()
lowercase_ : int ={'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowercase_ : List[Any] =True
lowercase_ : int =False
def A__ ( self):
lowercase = TrOCRStandaloneDecoderModelTester(self ,is_training=A__)
lowercase = ConfigTester(self ,config_class=A__)
def A__ ( self):
pass
def A__ ( self):
pass
def A__ ( self):
pass
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*A__)
def A__ ( self):
return
@unittest.skip('''The model doesn\'t support left padding''') # and it's not used enough to be worth fixing :)
def A__ ( self):
pass
| 97 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
snake_case : Dict = logging.get_logger(__name__)
snake_case : Union[str, Any] = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
snake_case : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
a :List[str] = model_type_to_module_name(__lowerCAmelCase )
a :List[Any] = importlib.import_module(F'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(__lowerCAmelCase , __lowerCAmelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__lowerCAmelCase , '''__name__''' , __lowerCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
a :List[str] = importlib.import_module('''transformers''' )
if hasattr(__lowerCAmelCase , __lowerCAmelCase ):
return getattr(__lowerCAmelCase , __lowerCAmelCase )
return None
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, os.PathLike] , UpperCAmelCase_ : Optional[Union[str, os.PathLike]] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[Dict[str, str]] = None , UpperCAmelCase_ : Optional[Union[bool, str]] = None , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : bool = False , **UpperCAmelCase_ : Dict , ):
"""simple docstring"""
a :List[str] = get_file_from_repo(
__lowerCAmelCase , __lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , resume_download=__lowerCAmelCase , proxies=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , revision=__lowerCAmelCase , local_files_only=__lowerCAmelCase , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(__lowerCAmelCase , encoding='''utf-8''' ) as reader:
return json.load(__lowerCAmelCase )
class _snake_case :
def __init__( self ):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( cls , _lowerCamelCase , **_lowerCamelCase ):
a :Tuple = kwargs.pop('''config''' , _UpperCamelCase )
a :Any = kwargs.pop('''trust_remote_code''' , _UpperCamelCase )
a :Dict = True
a , a :Optional[int] = FeatureExtractionMixin.get_feature_extractor_dict(_UpperCamelCase , **_UpperCamelCase )
a :Optional[Any] = config_dict.get('''feature_extractor_type''' , _UpperCamelCase )
a :Optional[int] = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
a :List[str] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
a :Optional[Any] = AutoConfig.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
# It could be in `config.feature_extractor_type``
a :Optional[int] = getattr(_UpperCamelCase , '''feature_extractor_type''' , _UpperCamelCase )
if hasattr(_UpperCamelCase , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
a :Optional[int] = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
a :Any = feature_extractor_class_from_name(_UpperCamelCase )
a :int = feature_extractor_auto_map is not None
a :Optional[int] = feature_extractor_class is not None or type(_UpperCamelCase ) in FEATURE_EXTRACTOR_MAPPING
a :Optional[int] = resolve_trust_remote_code(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if has_remote_code and trust_remote_code:
a :str = get_class_from_dynamic_module(
_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
a :Optional[int] = kwargs.pop('''code_revision''' , _UpperCamelCase )
if os.path.isdir(_UpperCamelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_UpperCamelCase ) in FEATURE_EXTRACTOR_MAPPING:
a :List[str] = FEATURE_EXTRACTOR_MAPPING[type(_UpperCamelCase )]
return feature_extractor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _lowerCamelCase , _lowerCamelCase ):
FEATURE_EXTRACTOR_MAPPING.register(_UpperCamelCase , _UpperCamelCase )
| 94 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def __a ( *_UpperCamelCase , **_UpperCamelCase ) -> str:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
_lowercase =MODEL_FOR_OBJECT_DETECTION_MAPPING
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
lowerCAmelCase_ = ObjectDetectionPipeline(model=_UpperCamelCase , image_processor=_UpperCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> Any:
lowerCAmelCase_ = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(_UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
_UpperCamelCase , {
"score": ANY(_UpperCamelCase ),
"label": ANY(_UpperCamelCase ),
"box": {"xmin": ANY(_UpperCamelCase ), "ymin": ANY(_UpperCamelCase ), "xmax": ANY(_UpperCamelCase ), "ymax": ANY(_UpperCamelCase )},
} , )
import datasets
lowerCAmelCase_ = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
lowerCAmelCase_ = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
lowerCAmelCase_ = object_detector(_UpperCamelCase , threshold=0.0 )
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(_UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
_UpperCamelCase , {
"score": ANY(_UpperCamelCase ),
"label": ANY(_UpperCamelCase ),
"box": {"xmin": ANY(_UpperCamelCase ), "ymin": ANY(_UpperCamelCase ), "xmax": ANY(_UpperCamelCase ), "ymax": ANY(_UpperCamelCase )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __a ( self ) -> Optional[int]:
pass
@require_torch
def __a ( self ) -> str:
lowerCAmelCase_ = "hf-internal-testing/tiny-detr-mobilenetsv3"
lowerCAmelCase_ = AutoModelForObjectDetection.from_pretrained(_UpperCamelCase )
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(_UpperCamelCase )
lowerCAmelCase_ = ObjectDetectionPipeline(model=_UpperCamelCase , feature_extractor=_UpperCamelCase )
lowerCAmelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
lowerCAmelCase_ = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = "facebook/detr-resnet-50"
lowerCAmelCase_ = AutoModelForObjectDetection.from_pretrained(_UpperCamelCase )
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(_UpperCamelCase )
lowerCAmelCase_ = ObjectDetectionPipeline(model=_UpperCamelCase , feature_extractor=_UpperCamelCase )
lowerCAmelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
lowerCAmelCase_ = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __a ( self ) -> str:
lowerCAmelCase_ = "facebook/detr-resnet-50"
lowerCAmelCase_ = pipeline("object-detection" , model=_UpperCamelCase )
lowerCAmelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
lowerCAmelCase_ = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = 0.9985
lowerCAmelCase_ = "facebook/detr-resnet-50"
lowerCAmelCase_ = pipeline("object-detection" , model=_UpperCamelCase )
lowerCAmelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=_UpperCamelCase )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __a ( self ) -> List[str]:
lowerCAmelCase_ = "Narsil/layoutlmv3-finetuned-funsd"
lowerCAmelCase_ = 0.9993
lowerCAmelCase_ = pipeline("object-detection" , model=_UpperCamelCase , threshold=_UpperCamelCase )
lowerCAmelCase_ = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , )
| 231 | 0 |
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return [sentence[i : i + ngram_size] for i in range(len(__UpperCAmelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 352 | """simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(lowercase):
a__: Optional[Any] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__: Any = FlaxAutoModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(lowercase):
a__: str = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__: Any = FlaxAutoModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
a__: Dict = AutoTokenizer.from_pretrained(lowercase)
a__: Union[str, Any] = FlaxBertModel.from_pretrained(lowercase)
a__: List[str] = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX)
@jax.jit
def eval(**lowercase):
return model(**lowercase)
eval(**lowercase).block_until_ready()
@slow
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
a__: Optional[Any] = AutoTokenizer.from_pretrained(lowercase)
a__: Any = FlaxRobertaModel.from_pretrained(lowercase)
a__: int = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX)
@jax.jit
def eval(**lowercase):
return model(**lowercase)
eval(**lowercase).block_until_ready()
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , 'bert-base is not a local folder and is not a valid model identifier'):
a__: str = FlaxAutoModel.from_pretrained('bert-base')
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
a__: List[str] = FlaxAutoModel.from_pretrained(lowercase , revision='aaaaaa')
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
a__: List[str] = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model')
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(lowercase , 'Use `from_pt=True` to load this model'):
a__: List[str] = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only')
| 203 | 0 |
'''simple docstring'''
from torch import nn
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self ,a_ ,a_ ) -> List[Any]:
super().__init__()
_UpperCAmelCase : Dict = class_size
_UpperCAmelCase : Union[str, Any] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_UpperCAmelCase : List[Any] = nn.Linear(a_ ,a_ )
def _snake_case ( self ,a_ ) -> Tuple:
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
_UpperCAmelCase : Optional[int] = self.mlp(a_ )
return logits
| 215 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = DPTConfig()
if "large" in checkpoint_url:
_UpperCAmelCase : List[Any] = 1024
_UpperCAmelCase : Optional[int] = 4096
_UpperCAmelCase : Tuple = 24
_UpperCAmelCase : List[str] = 16
_UpperCAmelCase : str = [5, 11, 17, 23]
_UpperCAmelCase : Tuple = [256, 512, 1024, 1024]
_UpperCAmelCase : List[str] = (1, 384, 384)
if "ade" in checkpoint_url:
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Tuple = 150
_UpperCAmelCase : Tuple = """huggingface/label-files"""
_UpperCAmelCase : int = """ade20k-id2label.json"""
_UpperCAmelCase : List[str] = json.load(open(cached_download(hf_hub_url(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) ) , """r""" ) )
_UpperCAmelCase : List[Any] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_UpperCAmelCase : Tuple = idalabel
_UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
_UpperCAmelCase : Optional[int] = [1, 150, 480, 480]
return config, expected_shape
def snake_case_ ( lowerCAmelCase_ )-> str:
'''simple docstring'''
_UpperCAmelCase : Tuple = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> Any:
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_UpperCAmelCase : int = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
_UpperCAmelCase : str = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
_UpperCAmelCase : int = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
_UpperCAmelCase : Dict = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
_UpperCAmelCase : List[str] = name.replace("""proj""" , """projection""" )
if "blocks" in name:
_UpperCAmelCase : Dict = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_UpperCAmelCase : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
_UpperCAmelCase : List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_UpperCAmelCase : Dict = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
_UpperCAmelCase : Dict = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
_UpperCAmelCase : List[Any] = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
_UpperCAmelCase : Optional[int] = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
_UpperCAmelCase : List[str] = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
_UpperCAmelCase : str = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
_UpperCAmelCase : Union[str, Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_UpperCAmelCase : Tuple = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
_UpperCAmelCase : List[Any] = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
_UpperCAmelCase : Tuple = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
_UpperCAmelCase : int = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
_UpperCAmelCase : List[str] = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
_UpperCAmelCase : List[str] = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
_UpperCAmelCase : Tuple = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
_UpperCAmelCase : List[Any] = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_UpperCAmelCase : List[str] = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
_UpperCAmelCase : Optional[int] = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
_UpperCAmelCase : int = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
_UpperCAmelCase : str = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
_UpperCAmelCase : Any = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
_UpperCAmelCase : Tuple = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
_UpperCAmelCase : Dict = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
_UpperCAmelCase : List[Any] = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
_UpperCAmelCase : List[Any] = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase : Any = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
_UpperCAmelCase : Union[str, Any] = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Optional[int] = in_proj_weight[: config.hidden_size, :]
_UpperCAmelCase : Optional[Any] = in_proj_bias[: config.hidden_size]
_UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase : Dict = in_proj_bias[-config.hidden_size :]
def snake_case_ ( )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase : List[str] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase : List[str] = get_dpt_config(lowerCAmelCase_ )
# load original state_dict from URL
_UpperCAmelCase : Any = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(lowerCAmelCase_ )
# rename keys
for key in state_dict.copy().keys():
_UpperCAmelCase : List[Any] = state_dict.pop(lowerCAmelCase_ )
_UpperCAmelCase : Dict = val
# read in qkv matrices
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ )
# load HuggingFace model
_UpperCAmelCase : Optional[int] = DPTForSemanticSegmentation(lowerCAmelCase_ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
model.eval()
# Check outputs on an image
_UpperCAmelCase : Tuple = 480 if """ade""" in checkpoint_url else 384
_UpperCAmelCase : List[str] = DPTImageProcessor(size=lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : Dict = image_processor(lowerCAmelCase_ , return_tensors="""pt""" )
# forward pass
_UpperCAmelCase : Optional[Any] = model(**lowerCAmelCase_ ).logits if """ade""" in checkpoint_url else model(**lowerCAmelCase_ ).predicted_depth
# Assert logits
_UpperCAmelCase : Optional[int] = torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] )
if "ade" in checkpoint_url:
_UpperCAmelCase : str = torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] )
assert outputs.shape == torch.Size(lowerCAmelCase_ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCAmelCase_ , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCAmelCase_ )
)
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
A_ : List[Any] = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 215 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase_ : Optional[Any] = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 215 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowerCamelCase_ : str = ["""bert-base-uncased""", """bert-base-cased"""]
lowerCamelCase_ : List[str] = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class __A ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , __A ) -> Dict:
super().__init__()
a =tokenizer
a =AutoConfig.from_pretrained(__A )
a =TFAutoModel.from_config(__A )
def SCREAMING_SNAKE_CASE ( self , __A ) -> int:
a =self.tokenizer(__A )
a =self.bert(**__A )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __A ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ) -> str:
super().setUp()
a =[
BertTokenizer.from_pretrained(__A ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
a =[TFBertTokenizer.from_pretrained(__A ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__A , use_fast_bert_tokenizer=__A )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
a =[
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
a =list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
a =tokenizer(__A , return_tensors='''tf''' , padding='''longest''' )
a =tf_tokenizer(__A )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> str:
for tf_tokenizer in self.tf_tokenizers:
a =tf_tokenizer(self.paired_sentences )
a =tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
a =tf.function(__A )
for test_inputs in (self.test_sentences, self.paired_sentences):
a =tf.constant(__A )
a =compiled_tokenizer(__A )
a =tf_tokenizer(__A )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
for tf_tokenizer in self.tf_tokenizers:
a =ModelToSave(tokenizer=__A )
a =tf.convert_to_tensor(self.test_sentences )
a =model(__A ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
a =Path(__A ) / '''saved.model'''
model.save(__A )
a =tf.keras.models.load_model(__A )
a =loaded_model(__A )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 ) | 215 | 1 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__lowerCamelCase = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
__lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase ( ):
snake_case : Optional[Any] = "https://pypi.org/pypi/diffusers/json"
snake_case : List[str] = json.loads(request.urlopen(__lowerCamelCase ).read() )["releases"].keys()
return sorted(__lowerCamelCase , key=lambda __lowerCamelCase : version.Version(__lowerCamelCase ) )
def UpperCamelCase ( ):
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
snake_case : int = Path(__lowerCamelCase ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def UpperCamelCase ( __lowerCamelCase : Union[str, os.PathLike] ):
init_hf_modules()
snake_case : Union[str, Any] = Path(__lowerCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
snake_case : Dict = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] ):
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as f:
snake_case : Tuple = f.read()
# Imports of the form `import .xxx`
snake_case : Any = re.findall("^\s*import\s+\.(\S+)\s*$" , __lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , __lowerCamelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(__lowerCamelCase ) )
def UpperCamelCase ( __lowerCamelCase : int ):
snake_case : Union[str, Any] = False
snake_case : str = [module_file]
snake_case : str = []
# Let's recurse through all relative imports
while not no_change:
snake_case : int = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__lowerCamelCase ) )
snake_case : Any = Path(__lowerCamelCase ).parent
snake_case : str = [str(module_path / m ) for m in new_imports]
snake_case : Tuple = [f for f in new_import_files if f not in all_relative_imports]
snake_case : Optional[int] = [f"""{f}.py""" for f in new_import_files]
snake_case : Optional[Any] = len(__lowerCamelCase ) == 0
all_relative_imports.extend(__lowerCamelCase )
return all_relative_imports
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] ):
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as f:
snake_case : int = f.read()
# Imports of the form `import xxx`
snake_case : Union[str, Any] = re.findall("^\s*import\s+(\S+)\s*$" , __lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , __lowerCamelCase , flags=re.MULTILINE )
# Only keep the top-level module
snake_case : Dict = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
snake_case : Optional[Any] = list(set(__lowerCamelCase ) )
snake_case : Union[str, Any] = []
for imp in imports:
try:
importlib.import_module(__lowerCamelCase )
except ImportError:
missing_packages.append(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
f"""{', '.join(__lowerCamelCase )}. Run `pip install {' '.join(__lowerCamelCase )}`""" )
return get_relative_imports(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Any ):
snake_case : List[str] = module_path.replace(os.path.sep , "." )
snake_case : List[Any] = importlib.import_module(__lowerCamelCase )
if class_name is None:
return find_pipeline_class(__lowerCamelCase )
return getattr(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : List[str] ):
from ..pipelines import DiffusionPipeline
snake_case : List[str] = dict(inspect.getmembers(__lowerCamelCase , inspect.isclass ) )
snake_case : str = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __lowerCamelCase )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
snake_case : Dict = cls
return pipeline_class
def UpperCamelCase ( __lowerCamelCase : Union[str, os.PathLike] , __lowerCamelCase : str , __lowerCamelCase : Optional[Union[str, os.PathLike]] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[Dict[str, str]] = None , __lowerCamelCase : Optional[Union[bool, str]] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : bool = False , ):
snake_case : List[str] = str(__lowerCamelCase )
snake_case : Optional[Any] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
snake_case : Optional[int] = module_file_or_url
snake_case : List[str] = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
snake_case : Optional[Any] = get_diffusers_versions()
# cut ".dev0"
snake_case : str = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
snake_case : Dict = latest_version if latest_version[1:] in available_versions else "main"
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
snake_case : Dict = f"""v{revision}"""
elif revision == "main":
snake_case : str = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
snake_case : Any = COMMUNITY_PIPELINES_URL.format(revision=__lowerCamelCase , pipeline=__lowerCamelCase )
try:
snake_case : List[Any] = cached_download(
__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , local_files_only=__lowerCamelCase , use_auth_token=__lowerCamelCase , )
snake_case : Tuple = "git"
snake_case : Optional[int] = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
snake_case : List[Any] = hf_hub_download(
__lowerCamelCase , __lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , local_files_only=__lowerCamelCase , use_auth_token=__lowerCamelCase , )
snake_case : Union[str, Any] = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
snake_case : List[str] = check_imports(__lowerCamelCase )
# Now we move the module inside our cached dynamic modules.
snake_case : Tuple = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__lowerCamelCase )
snake_case : Union[str, Any] = Path(__lowerCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__lowerCamelCase , submodule_path / module_file )
for module_needed in modules_needed:
snake_case : str = f"""{module_needed}.py"""
shutil.copy(os.path.join(__lowerCamelCase , __lowerCamelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__lowerCamelCase , __lowerCamelCase ):
snake_case : Any = use_auth_token
elif use_auth_token is True:
snake_case : Dict = HfFolder.get_token()
else:
snake_case : Optional[Any] = None
snake_case : Optional[Any] = model_info(__lowerCamelCase , revision=__lowerCamelCase , token=__lowerCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
snake_case : Any = submodule_path / commit_hash
snake_case : List[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__lowerCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(__lowerCamelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__lowerCamelCase , f"""{module_needed}.py""" , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , resume_download=__lowerCamelCase , proxies=__lowerCamelCase , use_auth_token=__lowerCamelCase , revision=__lowerCamelCase , local_files_only=__lowerCamelCase , )
return os.path.join(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Union[str, os.PathLike] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[Union[str, os.PathLike]] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[Dict[str, str]] = None , __lowerCamelCase : Optional[Union[bool, str]] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : bool = False , **__lowerCamelCase : Tuple , ):
snake_case : int = get_cached_module_file(
__lowerCamelCase , __lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , resume_download=__lowerCamelCase , proxies=__lowerCamelCase , use_auth_token=__lowerCamelCase , revision=__lowerCamelCase , local_files_only=__lowerCamelCase , )
return get_class_in_module(__lowerCamelCase , final_module.replace(".py" , "" ) )
| 59 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84 | 0 |
'''simple docstring'''
from collections.abc import Sequence
def a__ ( lowercase : Sequence[int] | None = None ) -> Optional[int]:
"""simple docstring"""
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
_UpperCamelCase = nums[0]
for i in range(1, len(A__ ) ):
_UpperCamelCase = nums[i]
_UpperCamelCase = max(A__, ans + num, A__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase__ : List[str] = int(input('Enter number of elements : ').strip())
lowercase__ : Tuple = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 357 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : str = 16
lowercase__ : int = 32
def a__ ( lowercase : Accelerator, lowercase : int = 16 ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_UpperCamelCase = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(lowercase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
_UpperCamelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowercase, max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCamelCase = datasets.map(
lowercase, batched=lowercase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCamelCase = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(lowercase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
_UpperCamelCase = 8
else:
_UpperCamelCase = None
return tokenizer.pad(
lowercase, padding='''longest''', max_length=lowercase, pad_to_multiple_of=lowercase, return_tensors='''pt''', )
# Instantiate dataloaders.
_UpperCamelCase = DataLoader(
tokenized_datasets['''train'''], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
_UpperCamelCase = DataLoader(
tokenized_datasets['''validation'''], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ : str = mocked_dataloaders # noqa: F811
def a__ ( lowercase : List[Any], lowercase : List[str] ) -> Any:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', lowercase ) == "1":
_UpperCamelCase = 2
# Initialize accelerator
_UpperCamelCase = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase = config['''lr''']
_UpperCamelCase = int(config['''num_epochs'''] )
_UpperCamelCase = int(config['''seed'''] )
_UpperCamelCase = int(config['''batch_size'''] )
_UpperCamelCase = evaluate.load('''glue''', '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowercase )
def inner_training_loop(lowercase : List[Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
_UpperCamelCase = AdamW(params=model.parameters(), lr=lowercase )
_UpperCamelCase , _UpperCamelCase = get_dataloaders(lowercase, lowercase )
# Instantiate scheduler
_UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=lowercase, num_warmup_steps=100, num_training_steps=(len(lowercase ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = accelerator.prepare(
lowercase, lowercase, lowercase, lowercase, lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCamelCase = model(**lowercase )
_UpperCamelCase = outputs.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCamelCase = model(**lowercase )
_UpperCamelCase = outputs.logits.argmax(dim=-1 )
_UpperCamelCase , _UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowercase, references=lowercase, )
_UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""", lowercase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=lowercase, default=lowercase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowercase, lowercase )
if __name__ == "__main__":
main()
| 287 | 0 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
_lowerCAmelCase : Optional[int] = logging.getLogger(__name__)
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = """token-classification"""
def __init__( self :List[str] , lowerCamelCase :Tuple ) -> Optional[int]:
if type(lowerCamelCase ) == dict:
UpperCAmelCase__ = Namespace(**lowerCamelCase )
UpperCAmelCase__ = import_module("tasks" )
try:
UpperCAmelCase__ = getattr(lowerCamelCase , hparams.task_type )
UpperCAmelCase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
UpperCAmelCase__ = self.token_classification_task.get_labels(hparams.labels )
UpperCAmelCase__ = CrossEntropyLoss().ignore_index
super().__init__(lowerCamelCase , len(self.labels ) , self.mode )
def UpperCAmelCase_ ( self :List[str] , **lowerCamelCase :Optional[Any] ) -> Tuple:
return self.model(**lowerCamelCase )
def UpperCAmelCase_ ( self :Tuple , lowerCamelCase :Tuple , lowerCamelCase :Tuple ) -> str:
UpperCAmelCase__ = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase__ = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase__ = self(**lowerCamelCase )
UpperCAmelCase__ = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def UpperCAmelCase_ ( self :Dict ) -> str:
UpperCAmelCase__ = self.hparams
for mode in ["train", "dev", "test"]:
UpperCAmelCase__ = self._feature_file(lowerCamelCase )
if os.path.exists(lowerCamelCase ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , lowerCamelCase )
UpperCAmelCase__ = torch.load(lowerCamelCase )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
UpperCAmelCase__ = self.token_classification_task.read_examples_from_file(args.data_dir , lowerCamelCase )
UpperCAmelCase__ = self.token_classification_task.convert_examples_to_features(
lowerCamelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowerCamelCase , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , lowerCamelCase )
torch.save(lowerCamelCase , lowerCamelCase )
def UpperCAmelCase_ ( self :int , lowerCamelCase :int , lowerCamelCase :int , lowerCamelCase :bool = False ) -> DataLoader:
UpperCAmelCase__ = self._feature_file(lowerCamelCase )
logger.info("Loading features from cached file %s" , lowerCamelCase )
UpperCAmelCase__ = torch.load(lowerCamelCase )
UpperCAmelCase__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCAmelCase__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCAmelCase__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCAmelCase__ = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCAmelCase__ = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) , batch_size=lowerCamelCase )
def UpperCAmelCase_ ( self :Dict , lowerCamelCase :int , lowerCamelCase :Union[str, Any] ) -> Optional[Any]:
"""Compute validation""" ""
UpperCAmelCase__ = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase__ = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase__ = self(**lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ = outputs[:2]
UpperCAmelCase__ = logits.detach().cpu().numpy()
UpperCAmelCase__ = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCAmelCase_ ( self :List[Any] , lowerCamelCase :int ) -> Optional[Any]:
UpperCAmelCase__ = torch.stack([x["val_loss"] for x in outputs] ).mean()
UpperCAmelCase__ = np.concatenate([x["pred"] for x in outputs] , axis=0 )
UpperCAmelCase__ = np.argmax(lowerCamelCase , axis=2 )
UpperCAmelCase__ = np.concatenate([x["target"] for x in outputs] , axis=0 )
UpperCAmelCase__ = dict(enumerate(self.labels ) )
UpperCAmelCase__ = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase__ = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCAmelCase__ = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(lowerCamelCase , lowerCamelCase ),
"precision": precision_score(lowerCamelCase , lowerCamelCase ),
"recall": recall_score(lowerCamelCase , lowerCamelCase ),
"f1": fa_score(lowerCamelCase , lowerCamelCase ),
}
UpperCAmelCase__ = dict(results.items() )
UpperCAmelCase__ = results
return ret, preds_list, out_label_list
def UpperCAmelCase_ ( self :Tuple , lowerCamelCase :Dict ) -> str:
# when stable
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._eval_end(lowerCamelCase )
UpperCAmelCase__ = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCAmelCase_ ( self :Union[str, Any] , lowerCamelCase :List[str] ) -> List[str]:
# updating to test_epoch_end instead of deprecated test_end
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._eval_end(lowerCamelCase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCAmelCase__ = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCAmelCase_ ( lowerCamelCase :Tuple , lowerCamelCase :List[Any] ) -> Dict:
# Add NER specific options
BaseTransformer.add_model_specific_args(lowerCamelCase , lowerCamelCase )
parser.add_argument(
"--task_type" , default="NER" , type=lowerCamelCase , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=128 , type=lowerCamelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=lowerCamelCase , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=lowerCamelCase , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
_lowerCAmelCase : List[str] = NERTransformer.add_model_specific_args(parser, os.getcwd())
_lowerCAmelCase : List[str] = parser.parse_args()
_lowerCAmelCase : Tuple = NERTransformer(args)
_lowerCAmelCase : List[Any] = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
_lowerCAmelCase : Any = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True))
_lowerCAmelCase : Union[str, Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 169 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase : List[str] = 1_6
_lowerCAmelCase : List[Any] = 3_2
def lowerCAmelCase ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
UpperCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCAmelCase : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase__ = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase__ = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase__ = 8
else:
UpperCAmelCase__ = None
return tokenizer.pad(
_lowerCAmelCase , padding="longest" , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase__ = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
UpperCAmelCase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCAmelCase : int = mocked_dataloaders # noqa: F811
def lowerCAmelCase ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCAmelCase ) == "1":
UpperCAmelCase__ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
UpperCAmelCase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
UpperCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ = config["lr"]
UpperCAmelCase__ = int(config["num_epochs"] )
UpperCAmelCase__ = int(config["seed"] )
UpperCAmelCase__ = int(config["batch_size"] )
set_seed(_lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase__ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase__ = AdamW(params=model.parameters() , lr=_lowerCAmelCase )
# Instantiate scheduler
UpperCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
UpperCAmelCase__ = os.path.split(_lowerCAmelCase )[-1].split("." )[0]
accelerator.init_trackers(_lowerCAmelCase , _lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
UpperCAmelCase__ = 0
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase__ = model(**_lowerCAmelCase )
UpperCAmelCase__ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
UpperCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ = model(**_lowerCAmelCase )
UpperCAmelCase__ = outputs.logits.argmax(dim=-1 )
UpperCAmelCase__ , UpperCAmelCase__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
UpperCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _lowerCAmelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(_lowerCAmelCase ),
"epoch": epoch,
} , step=_lowerCAmelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=_lowerCAmelCase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 169 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase: str = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: Tuple = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCAmelCase: str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 368 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase: Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = XLNetTokenizer
SCREAMING_SNAKE_CASE_ : Dict = XLNetTokenizerFast
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
def lowerCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = """<s>"""
_lowercase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) ,UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""<eod>""" )
self.assertEqual(len(UpperCAmelCase_ ) ,10_06 )
def lowerCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,10_00 )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCAmelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,[2_85, 46, 10, 1_70, 3_82] )
_lowercase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
_lowercase : List[Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,[8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
_lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""▁he""", """ll""", """o"""] )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
@slow
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
_lowercase : int = tokenizer.encode("""sequence builders""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
_lowercase : Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ,UpperCAmelCase_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCamelCase__ ( self ):
# fmt: off
_lowercase : Union[str, Any] = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ ,model_name="""xlnet-base-cased""" ,revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" ,)
| 336 | 0 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =OmegaConf.load(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =torch.load(_UpperCamelCase , map_location='cpu' )['model']
_SCREAMING_SNAKE_CASE =list(state_dict.keys() )
# extract state_dict for VQVAE
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE ='first_stage_model.'
for key in keys:
if key.startswith(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =state_dict[key]
# extract state_dict for UNetLDM
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE ='model.diffusion_model.'
for key in keys:
if key.startswith(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =state_dict[key]
_SCREAMING_SNAKE_CASE =config.model.params.first_stage_config.params
_SCREAMING_SNAKE_CASE =config.model.params.unet_config.params
_SCREAMING_SNAKE_CASE =VQModel(**_UpperCamelCase ).eval()
vqvae.load_state_dict(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =UNetLDMModel(**_UpperCamelCase ).eval()
unet.load_state_dict(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=_UpperCamelCase , )
_SCREAMING_SNAKE_CASE =LDMPipeline(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
pipeline.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", type=str, required=True)
parser.add_argument("--config_path", type=str, required=True)
parser.add_argument("--output_path", type=str, required=True)
lowerCamelCase : str = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 47 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class A__ ( A__ ):
A__ = 'deta'
A__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Tuple , _a : Optional[int]=None , _a : int=900 , _a : Optional[Any]=2048 , _a : int=6 , _a : Tuple=2048 , _a : Optional[int]=8 , _a : Any=6 , _a : str=1024 , _a : int=8 , _a : int=0.0 , _a : Optional[Any]=True , _a : Tuple="relu" , _a : Union[str, Any]=256 , _a : Tuple=0.1 , _a : str=0.0 , _a : Dict=0.0 , _a : Tuple=0.02 , _a : Union[str, Any]=1.0 , _a : Any=True , _a : Tuple=False , _a : List[Any]="sine" , _a : str=5 , _a : List[Any]=4 , _a : str=4 , _a : Union[str, Any]=True , _a : Optional[int]=300 , _a : Dict=True , _a : List[Any]=True , _a : List[Any]=1 , _a : List[str]=5 , _a : int=2 , _a : Dict=1 , _a : str=1 , _a : Optional[Any]=5 , _a : Union[str, Any]=2 , _a : List[str]=0.1 , _a : List[Any]=0.25 , **_a : Union[str, Any] , ) -> List[str]:
'''simple docstring'''
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_SCREAMING_SNAKE_CASE =CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =backbone_config.pop('model_type' )
_SCREAMING_SNAKE_CASE =CONFIG_MAPPING[backbone_model_type]
_SCREAMING_SNAKE_CASE =config_class.from_dict(_a )
_SCREAMING_SNAKE_CASE =backbone_config
_SCREAMING_SNAKE_CASE =num_queries
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =d_model
_SCREAMING_SNAKE_CASE =encoder_ffn_dim
_SCREAMING_SNAKE_CASE =encoder_layers
_SCREAMING_SNAKE_CASE =encoder_attention_heads
_SCREAMING_SNAKE_CASE =decoder_ffn_dim
_SCREAMING_SNAKE_CASE =decoder_layers
_SCREAMING_SNAKE_CASE =decoder_attention_heads
_SCREAMING_SNAKE_CASE =dropout
_SCREAMING_SNAKE_CASE =attention_dropout
_SCREAMING_SNAKE_CASE =activation_dropout
_SCREAMING_SNAKE_CASE =activation_function
_SCREAMING_SNAKE_CASE =init_std
_SCREAMING_SNAKE_CASE =init_xavier_std
_SCREAMING_SNAKE_CASE =encoder_layerdrop
_SCREAMING_SNAKE_CASE =auxiliary_loss
_SCREAMING_SNAKE_CASE =position_embedding_type
# deformable attributes
_SCREAMING_SNAKE_CASE =num_feature_levels
_SCREAMING_SNAKE_CASE =encoder_n_points
_SCREAMING_SNAKE_CASE =decoder_n_points
_SCREAMING_SNAKE_CASE =two_stage
_SCREAMING_SNAKE_CASE =two_stage_num_proposals
_SCREAMING_SNAKE_CASE =with_box_refine
_SCREAMING_SNAKE_CASE =assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
_SCREAMING_SNAKE_CASE =class_cost
_SCREAMING_SNAKE_CASE =bbox_cost
_SCREAMING_SNAKE_CASE =giou_cost
# Loss coefficients
_SCREAMING_SNAKE_CASE =mask_loss_coefficient
_SCREAMING_SNAKE_CASE =dice_loss_coefficient
_SCREAMING_SNAKE_CASE =bbox_loss_coefficient
_SCREAMING_SNAKE_CASE =giou_loss_coefficient
_SCREAMING_SNAKE_CASE =eos_coefficient
_SCREAMING_SNAKE_CASE =focal_alpha
super().__init__(is_encoder_decoder=_a , **_a )
@property
def A ( self : Dict ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def A ( self : List[Any] ) -> int:
'''simple docstring'''
return self.d_model
def A ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.backbone_config.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output
| 47 | 1 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
lowerCamelCase : List[str] = logging.getLogger(__name__)
lowerCamelCase : str = {'''facebook/bart-base''': BartForConditionalGeneration}
lowerCamelCase : Optional[Any] = {'''facebook/bart-base''': BartTokenizer}
def snake_case_ ( ):
__lowercase : List[Any] = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=lowerCAmelCase_ , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=lowerCAmelCase_ , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCAmelCase_ , )
parser.add_argument(
"""--config_name""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=lowerCAmelCase_ , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="""Where to store the final ONNX file.""" )
__lowercase : Optional[int] = parser.parse_args()
return args
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any]="cpu" ):
__lowercase : List[Any] = model_dict[model_name].from_pretrained(lowerCAmelCase_ ).to(lowerCAmelCase_ )
__lowercase : List[Any] = tokenizer_dict[model_name].from_pretrained(lowerCAmelCase_ )
if model_name in ["facebook/bart-base"]:
__lowercase : str = 0
__lowercase : Dict = None
__lowercase : List[str] = 0
return huggingface_model, tokenizer
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] ):
model.eval()
__lowercase : Dict = None
__lowercase : List[str] = torch.jit.script(BARTBeamSearchGenerator(lowerCAmelCase_ ) )
with torch.no_grad():
__lowercase : Optional[Any] = """My friends are cool but they eat too many carbs."""
__lowercase : Tuple = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="""pt""" ).to(model.device )
__lowercase : Tuple = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=lowerCAmelCase_ , max_length=lowerCAmelCase_ , early_stopping=lowerCAmelCase_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
lowerCAmelCase_ , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , lowerCAmelCase_ , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=lowerCAmelCase_ , )
logger.info("""Model exported to {}""".format(lowerCAmelCase_ ) )
__lowercase : Tuple = remove_dup_initializers(os.path.abspath(lowerCAmelCase_ ) )
logger.info("""Deduplicated and optimized model written to {}""".format(lowerCAmelCase_ ) )
__lowercase : int = onnxruntime.InferenceSession(lowerCAmelCase_ )
__lowercase : str = ort_sess.run(
lowerCAmelCase_ , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(lowerCAmelCase_ ),
"""max_length""": np.array(lowerCAmelCase_ ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def snake_case_ ( ):
__lowercase : int = parse_args()
__lowercase : int = 5
__lowercase : List[str] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
__lowercase : Any = torch.device(args.device )
__lowercase : Tuple = load_model_tokenizer(args.model_name_or_path , lowerCAmelCase_ )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(lowerCAmelCase_ )
if args.max_length:
__lowercase : Optional[Any] = args.max_length
if args.num_beams:
__lowercase : Optional[Any] = args.num_beams
if args.output_file_path:
__lowercase : List[Any] = args.output_file_path
else:
__lowercase : str = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main() | 364 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler | 306 | 0 |
'''simple docstring'''
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def lowercase__ ( __lowercase : str ) -> Any:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def lowercase__ ( __lowercase : List[str] ) -> int:
"""simple docstring"""
__UpperCamelCase = create_tensor(_lowerCAmelCase )
__UpperCamelCase = gather(_lowerCAmelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def lowercase__ ( __lowercase : Optional[int] ) -> Dict:
"""simple docstring"""
__UpperCamelCase = [state.process_index]
__UpperCamelCase = gather_object(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == state.num_processes, F'''{gathered_obj}, {len(_lowerCAmelCase )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def lowercase__ ( __lowercase : List[Any] ) -> str:
"""simple docstring"""
__UpperCamelCase = create_tensor(_lowerCAmelCase )
__UpperCamelCase = broadcast(_lowerCAmelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def lowercase__ ( __lowercase : Optional[int] ) -> Tuple:
"""simple docstring"""
if state.is_main_process:
__UpperCamelCase = torch.arange(state.num_processes + 1 ).to(state.device )
else:
__UpperCamelCase = torch.arange(state.num_processes ).to(state.device )
__UpperCamelCase = pad_across_processes(_lowerCAmelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def lowercase__ ( __lowercase : Any ) -> str:
"""simple docstring"""
if state.num_processes != 2:
return
__UpperCamelCase = create_tensor(_lowerCAmelCase )
__UpperCamelCase = reduce(_lowerCAmelCase , 'sum' )
__UpperCamelCase = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase ), F'''{reduced_tensor} != {truth_tensor}'''
def lowercase__ ( __lowercase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if state.num_processes != 2:
return
__UpperCamelCase = create_tensor(_lowerCAmelCase )
__UpperCamelCase = reduce(_lowerCAmelCase , 'mean' )
__UpperCamelCase = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase ), F'''{reduced_tensor} != {truth_tensor}'''
def lowercase__ ( __lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
main()
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = PartialState()
state.print(F'''State: {state}''' )
state.print('testing gather' )
test_gather(_lowerCAmelCase )
state.print('testing gather_object' )
test_gather_object(_lowerCAmelCase )
state.print('testing broadcast' )
test_broadcast(_lowerCAmelCase )
state.print('testing pad_across_processes' )
test_pad_across_processes(_lowerCAmelCase )
state.print('testing reduce_sum' )
test_reduce_sum(_lowerCAmelCase )
state.print('testing reduce_mean' )
test_reduce_mean(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 53 |
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : int = 3, _lowerCAmelCase : int = 7, _lowerCAmelCase : int = 1000000 ) -> int:
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : int = 1
for current_denominator in range(1, limit + 1 ):
_UpperCAmelCase : Union[str, Any] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_UpperCAmelCase : Optional[Any] = current_numerator
_UpperCAmelCase : str = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 246 | 0 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __snake_case (ctypes.Structure ):
# _fields is a specific attr expected by ctypes
lowerCAmelCase__ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def _UpperCAmelCase ():
'''simple docstring'''
if os.name == "nt":
_lowerCAmelCase : Tuple = CursorInfo()
_lowerCAmelCase : Any = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase_ , ctypes.byref(UpperCamelCase_ ) )
_lowerCAmelCase : Tuple = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase_ , ctypes.byref(UpperCamelCase_ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def _UpperCAmelCase ():
'''simple docstring'''
if os.name == "nt":
_lowerCAmelCase : Any = CursorInfo()
_lowerCAmelCase : str = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase_ , ctypes.byref(UpperCamelCase_ ) )
_lowerCAmelCase : List[Any] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase_ , ctypes.byref(UpperCamelCase_ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def _UpperCAmelCase ():
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 159 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _UpperCAmelCase (UpperCamelCase_ : Sequence[float] , UpperCamelCase_ : int , UpperCamelCase_ : int ):
'''simple docstring'''
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_lowerCAmelCase : List[str] = (low + high) // 2
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = max_subarray(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = max_subarray(UpperCamelCase_ , mid + 1 , UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = max_cross_sum(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _UpperCAmelCase (UpperCamelCase_ : Sequence[float] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = float("""-inf""" ), -1
_lowerCAmelCase , _lowerCAmelCase : str = float("""-inf""" ), -1
_lowerCAmelCase : int | float = 0
for i in range(UpperCamelCase_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_lowerCAmelCase : Any = summ
_lowerCAmelCase : Tuple = i
_lowerCAmelCase : int = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_lowerCAmelCase : List[Any] = summ
_lowerCAmelCase : str = i
return max_left, max_right, (left_sum + right_sum)
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase : str = [randint(1 , UpperCamelCase_ ) for _ in range(UpperCamelCase_ )]
_lowerCAmelCase : str = time.time()
max_subarray(UpperCamelCase_ , 0 , input_size - 1 )
_lowerCAmelCase : Any = time.time()
return end - start
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Any = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000]
_lowerCAmelCase : Any = [time_max_subarray(UpperCamelCase_ ) for input_size in input_sizes]
print("""No of Inputs\t\tTime Taken""" )
for input_size, runtime in zip(UpperCamelCase_ , UpperCamelCase_ ):
print(UpperCamelCase_ , """\t\t""" , UpperCamelCase_ )
plt.plot(UpperCamelCase_ , UpperCamelCase_ )
plt.xlabel("""Number of Inputs""" )
plt.ylabel("""Time taken in seconds""" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 159 | 1 |
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
SCREAMING_SNAKE_CASE__ = namedtuple("covid_data", "cases deaths recovered")
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ):
'''simple docstring'''
lowerCAmelCase = """//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(SCREAMING_SNAKE_CASE ).content ).xpath(SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE__ = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 46 |
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a : List[str] = logging.get_logger(__name__)
a : List[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a : str = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
a : Tuple = {'''allegro/herbert-base-cased''': 514}
a : Optional[int] = {}
class __UpperCamelCase ( a__ ):
lowerCamelCase : str =VOCAB_FILES_NAMES
lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Dict =PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] =HerbertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__="</s>" , **lowerCAmelCase__ , ) -> Optional[int]:
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.cls_token_id]
a : Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Dict = [self.sep_token_id]
a : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
a : List[str] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 105 | 0 |
'''simple docstring'''
def __a ( UpperCAmelCase = 100 ) ->int:
"""simple docstring"""
A = (n * (n + 1) // 2) ** 2
A = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 337 |
'''simple docstring'''
_lowerCamelCase : List[Any] = 'Input must be a string of 8 numbers plus letter'
_lowerCamelCase : str = 'TRWAGMYFPDXBNJZSQVHLCKE'
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
A = f"""Expected string as input, found {type(UpperCAmelCase ).__name__}"""
raise TypeError(UpperCAmelCase )
A = spanish_id.replace("""-""" , """""" ).upper()
if len(UpperCAmelCase ) != 9:
raise ValueError(UpperCAmelCase )
try:
A = int(spanish_id_clean[0:8] )
A = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(UpperCAmelCase ) from ex
if letter.isdigit():
raise ValueError(UpperCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 1 |
"""simple docstring"""
from __future__ import annotations
a__ : List[str] = tuple[int, int, int]
a__ : Dict = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
a__ : str = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
# -------------------------- default selection --------------------------
# rotors --------------------------
a__ : str = '''EGZWVONAHDCLFQMSIPJBYUKXTR'''
a__ : List[str] = '''FOBHMDKEXQNRAULPGSJVTYICZW'''
a__ : Any = '''ZJXESIUQLHAVRMDOYGTNFWPBKC'''
# reflector --------------------------
a__ : Optional[int] = {
'''A''': '''N''',
'''N''': '''A''',
'''B''': '''O''',
'''O''': '''B''',
'''C''': '''P''',
'''P''': '''C''',
'''D''': '''Q''',
'''Q''': '''D''',
'''E''': '''R''',
'''R''': '''E''',
'''F''': '''S''',
'''S''': '''F''',
'''G''': '''T''',
'''T''': '''G''',
'''H''': '''U''',
'''U''': '''H''',
'''I''': '''V''',
'''V''': '''I''',
'''J''': '''W''',
'''W''': '''J''',
'''K''': '''X''',
'''X''': '''K''',
'''L''': '''Y''',
'''Y''': '''L''',
'''M''': '''Z''',
'''Z''': '''M''',
}
# -------------------------- extra rotors --------------------------
a__ : Tuple = '''RMDJXFUWGISLHVTCQNKYPBEZOA'''
a__ : str = '''SGLCPQWZHKXAREONTFBVIYJUDM'''
a__ : Optional[Any] = '''HVSICLTYKQUBXDWAJZOMFGPREN'''
a__ : List[Any] = '''RZWQHFMVDBKICJLNTUXAGYPSOE'''
a__ : Optional[int] = '''LFKIJODBEGAMQPXVUHYSTCZRWN'''
a__ : List[Any] = '''KOAEGVDHXPQZMLFTYWJNBRCIUS'''
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if (unique_rotsel := len(set(lowerCAmelCase_ ) )) < 3:
__SCREAMING_SNAKE_CASE = f"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(lowerCAmelCase_ )
# Checks if rotor positions are valid
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rotpos
if not 0 < rotorposa <= len(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = f"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(lowerCAmelCase_ )
if not 0 < rotorposa <= len(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = f"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(lowerCAmelCase_ )
if not 0 < rotorposa <= len(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = f"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(lowerCAmelCase_ )
# Validates string and returns dict
__SCREAMING_SNAKE_CASE = _plugboard(lowerCAmelCase_ )
return rotpos, rotsel, pbdict
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = f"""Plugboard setting isn't type string ({type(lowerCAmelCase_ )})"""
raise TypeError(lowerCAmelCase_ )
elif len(lowerCAmelCase_ ) % 2 != 0:
__SCREAMING_SNAKE_CASE = f"""Odd number of symbols ({len(lowerCAmelCase_ )})"""
raise Exception(lowerCAmelCase_ )
elif pbstring == "":
return {}
pbstring.replace(" " , "" )
# Checks if all characters are unique
__SCREAMING_SNAKE_CASE = set()
for i in pbstring:
if i not in abc:
__SCREAMING_SNAKE_CASE = f"""'{i}' not in list of symbols"""
raise Exception(lowerCAmelCase_ )
elif i in tmppbl:
__SCREAMING_SNAKE_CASE = f"""Duplicate symbol ({i})"""
raise Exception(lowerCAmelCase_ )
else:
tmppbl.add(lowerCAmelCase_ )
del tmppbl
# Created the dictionary
__SCREAMING_SNAKE_CASE = {}
for j in range(0 , len(lowerCAmelCase_ ) - 1 , 2 ):
__SCREAMING_SNAKE_CASE = pbstring[j + 1]
__SCREAMING_SNAKE_CASE = pbstring[j]
return pb
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = (rotora, rotora, rotora) , lowerCAmelCase_ = "" , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = text.upper()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = _validator(
lowerCAmelCase_ , lowerCAmelCase_ , plugb.upper() )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rotor_position
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
__SCREAMING_SNAKE_CASE = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
__SCREAMING_SNAKE_CASE = plugboard[symbol]
# rotor ra --------------------------
__SCREAMING_SNAKE_CASE = abc.index(lowerCAmelCase_ ) + rotorposa
__SCREAMING_SNAKE_CASE = rotora[index % len(lowerCAmelCase_ )]
# rotor rb --------------------------
__SCREAMING_SNAKE_CASE = abc.index(lowerCAmelCase_ ) + rotorposa
__SCREAMING_SNAKE_CASE = rotora[index % len(lowerCAmelCase_ )]
# rotor rc --------------------------
__SCREAMING_SNAKE_CASE = abc.index(lowerCAmelCase_ ) + rotorposa
__SCREAMING_SNAKE_CASE = rotora[index % len(lowerCAmelCase_ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
__SCREAMING_SNAKE_CASE = reflector[symbol]
# 2nd rotors
__SCREAMING_SNAKE_CASE = abc[rotora.index(lowerCAmelCase_ ) - rotorposa]
__SCREAMING_SNAKE_CASE = abc[rotora.index(lowerCAmelCase_ ) - rotorposa]
__SCREAMING_SNAKE_CASE = abc[rotora.index(lowerCAmelCase_ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
__SCREAMING_SNAKE_CASE = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = 0
rotorposa += 1
if rotorposa >= len(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = 0
rotorposa += 1
if rotorposa >= len(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
a__ : Union[str, Any] = '''This is my Python script that emulates the Enigma machine from WWII.'''
a__ : Any = (1, 1, 1)
a__ : Optional[int] = '''pictures'''
a__ : Union[str, Any] = (rotora, rotora, rotora)
a__ : Tuple = enigma(message, rotor_pos, rotor_sel, pb)
print('''Encrypted message:''', en)
print('''Decrypted message:''', enigma(en, rotor_pos, rotor_sel, pb))
| 54 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a__ : List[str] = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : Dict = PegasusConfig
snake_case__ : Union[str, Any] = {}
snake_case__ : Any = "gelu"
def __init__( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int=1_3 , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : List[Any]=9_9 , UpperCAmelCase__ : int=3_2 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : List[Any]=3_7 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : List[Any]=2_0 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : Optional[Any]=0 , ) -> Any:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
def UpperCAmelCase_ ( self : Dict ) -> Dict:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__SCREAMING_SNAKE_CASE = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__SCREAMING_SNAKE_CASE = np.concatenate([input_ids, eos_tensor] , axis=1 )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__SCREAMING_SNAKE_CASE = prepare_pegasus_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, inputs_dict
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ) -> str:
__SCREAMING_SNAKE_CASE = 2_0
__SCREAMING_SNAKE_CASE = model_class_name(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model.encode(inputs_dict["input_ids"] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = 2_0
__SCREAMING_SNAKE_CASE = model_class_name(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model.encode(inputs_dict["input_ids"] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__SCREAMING_SNAKE_CASE = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
'''simple docstring'''
if attention_mask is None:
__SCREAMING_SNAKE_CASE = np.not_equal(lowerCAmelCase_ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Tuple = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
snake_case__ : Union[str, Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
snake_case__ : Tuple = True
snake_case__ : Union[str, Any] = False
snake_case__ : int = False
snake_case__ : List[Any] = False
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = FlaxPegasusModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
@jax.jit
def encode_jitted(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=None , **UpperCAmelCase__ : int ):
return model.encode(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
with self.subTest("JIT Enabled" ):
__SCREAMING_SNAKE_CASE = encode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = encode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase_ ( self : Tuple ) -> Any:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__SCREAMING_SNAKE_CASE = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ):
return model.decode(
decoder_input_ids=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , encoder_outputs=UpperCAmelCase__ , )
with self.subTest("JIT Enabled" ):
__SCREAMING_SNAKE_CASE = decode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = decode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("google/pegasus-large" , from_pt=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = np.ones((1, 1) )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
__SCREAMING_SNAKE_CASE = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
__SCREAMING_SNAKE_CASE = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
__SCREAMING_SNAKE_CASE = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
__SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase__ , return_tensors="np" , truncation=UpperCAmelCase__ , max_length=5_1_2 , padding=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model.generate(**UpperCAmelCase__ , num_beams=2 ).sequences
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
assert tgt_text == decoded
| 54 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __A :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : str , ) ->str:
"""simple docstring"""
snake_case_ = parent
snake_case_ = 13
snake_case_ = 7
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = 2
snake_case_ = 99
snake_case_ = 0
snake_case_ = 32
snake_case_ = 2
snake_case_ = 4
snake_case_ = 0.1
snake_case_ = 0.1
snake_case_ = 512
snake_case_ = 16
snake_case_ = 2
snake_case_ = 0.02
snake_case_ = 3
snake_case_ = 4
snake_case_ = """last"""
snake_case_ = True
snake_case_ = None
snake_case_ = 0
def lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
snake_case_ = None
if self.use_input_lengths:
snake_case_ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , ) ->List[Any]:
"""simple docstring"""
snake_case_ = TFFlaubertModel(config=UpperCAmelCase_ )
snake_case_ = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
snake_case_ = model(UpperCAmelCase_ )
snake_case_ = [input_ids, input_mask]
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , ) ->Any:
"""simple docstring"""
snake_case_ = TFFlaubertWithLMHeadModel(UpperCAmelCase_ )
snake_case_ = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = TFFlaubertForQuestionAnsweringSimple(UpperCAmelCase_ )
snake_case_ = {"""input_ids""": input_ids, """lengths""": input_lengths}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , ) ->Dict:
"""simple docstring"""
snake_case_ = TFFlaubertForSequenceClassification(UpperCAmelCase_ )
snake_case_ = {"""input_ids""": input_ids, """lengths""": input_lengths}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , ) ->Dict:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = TFFlaubertForTokenClassification(config=UpperCAmelCase_ )
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int , ) ->Any:
"""simple docstring"""
snake_case_ = self.num_choices
snake_case_ = TFFlaubertForMultipleChoice(config=UpperCAmelCase_ )
snake_case_ = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : Any ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Union[str, Any] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowercase: str = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__lowercase: Any = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowercase: Any = False
__lowercase: Any = False
def lowerCAmelCase ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ) ->str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCAmelCase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = TFFlaubertModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , emb_dim=37 )
def lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Any ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->List[str]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFFlaubertModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class __A (unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
snake_case_ = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
snake_case_ = model(UpperCAmelCase_ )[0]
snake_case_ = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , UpperCAmelCase_ )
# compare the actual values for a slice.
snake_case_ = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 233 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 ) -> list:
snake_case_ = length or len(_SCREAMING_SNAKE_CASE )
snake_case_ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
snake_case_ , snake_case_ = list_data[i + 1], list_data[i]
snake_case_ = True
return list_data if not swapped else bubble_sort(_SCREAMING_SNAKE_CASE , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 233 | 1 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Union[str, Any] = GPTaTokenizer
A_ : List[Any] = GPTaTokenizerFast
A_ : Dict = True
A_ : List[str] = {'add_prefix_space': True}
A_ : List[str] = False
def a (self : Any ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
__snake_case = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__snake_case = {'''unk_token''': '''<unk>'''}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(a__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(a__ ) )
def a (self : List[str] , **a__ : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a (self : Dict , **a__ : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def a (self : int , a__ : Optional[int] ):
"""simple docstring"""
__snake_case = '''lower newer'''
__snake_case = '''lower newer'''
return input_text, output_text
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case = '''lower newer'''
__snake_case = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__snake_case = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def a (self : int ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer(add_prefix_space=a__ )
__snake_case = '''lower newer'''
# Testing tokenization
__snake_case = tokenizer.tokenize(a__ , add_prefix_space=a__ )
__snake_case = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
__snake_case = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
__snake_case = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
__snake_case = self.get_rust_tokenizer(add_prefix_space=a__ )
__snake_case = tokenizer.encode(a__ , add_prefix_space=a__ )
__snake_case = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
__snake_case = tokens + [rust_tokenizer.unk_token]
__snake_case = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def a (self : Tuple , *a__ : Optional[int] , **a__ : Optional[Any] ):
"""simple docstring"""
pass
def a (self : List[str] , a__ : Any=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__snake_case = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
__snake_case = '''This is a simple input'''
__snake_case = ['''This is a simple input 1''', '''This is a simple input 2''']
__snake_case = ('''This is a simple input''', '''This is a pair''')
__snake_case = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding='''max_length''' )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding='''max_length''' )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding='''max_length''' , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding='''max_length''' )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding='''max_length''' )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding='''max_length''' , )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
__snake_case = '''This is a simple input'''
__snake_case = ['''This is a simple input looooooooong''', '''This is a simple input''']
__snake_case = ('''This is a simple input''', '''This is a pair''')
__snake_case = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
__snake_case = tokenizer.pad_token_id
__snake_case = tokenizer(a__ , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
__snake_case = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors='''np''' )
__snake_case = tokenizer(*a__ , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
__snake_case = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = '''$$$'''
__snake_case = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
__snake_case = '''This is a simple input'''
__snake_case = ['''This is a simple input 1''', '''This is a simple input 2''']
__snake_case = tokenizer.bos_token_id
__snake_case = tokenizer(a__ )
__snake_case = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__snake_case = tokenizer.decode(out_s.input_ids )
__snake_case = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def a (self : List[Any] ):
"""simple docstring"""
pass
def a (self : Dict ):
"""simple docstring"""
__snake_case = [self.get_tokenizer(do_lower_case=a__ , add_bos_token=a__ )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__snake_case = '''Encode this.'''
__snake_case = '''This one too please.'''
__snake_case = tokenizer.encode(a__ , add_special_tokens=a__ )
encoded_sequence += tokenizer.encode(a__ , add_special_tokens=a__ )
__snake_case = tokenizer.encode_plus(
a__ , a__ , add_special_tokens=a__ , return_special_tokens_mask=a__ , )
__snake_case = encoded_sequence_dict['''input_ids''']
__snake_case = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(a__ ) , len(a__ ) )
__snake_case = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(a__ )
]
__snake_case = [x for x in filtered_sequence if x is not None]
self.assertEqual(a__ , a__ )
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=a__ )
__snake_case = '''A photo of a cat'''
__snake_case = tokenizer.encode(
a__ , )
self.assertEqual(a__ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('''test_opt''' )
__snake_case = AutoTokenizer.from_pretrained('''./test_opt''' )
__snake_case = tokenizer.encode(
a__ , )
self.assertEqual(a__ , [2, 250, 1345, 9, 10, 4758] )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=a__ )
__snake_case = '''A photo of a cat'''
__snake_case = tokenizer.encode(
a__ , )
# Same as above
self.assertEqual(a__ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=a__ )
__snake_case = '''bos'''
__snake_case = tokenizer.get_vocab()['''bos''']
__snake_case = '''A photo of a cat'''
__snake_case = tokenizer.encode(
a__ , )
# We changed the bos token
self.assertEqual(a__ , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('''./tok''' )
__snake_case = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
__snake_case = tokenizer.encode(
a__ , )
self.assertEqual(a__ , [3_1957, 250, 1345, 9, 10, 4758] )
| 24 |
def lowerCamelCase__ ( ) -> int:
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(snake_case_ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | 1 |
import argparse
import json
import subprocess
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =[]
UpperCAmelCase : Union[str, Any] =(
f'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
UpperCAmelCase : Dict =subprocess.run(__lowerCAmelCase , shell=__lowerCAmelCase , stdout=subprocess.PIPE )
UpperCAmelCase : Tuple =output.stdout.decode('''utf-8''' )
UpperCAmelCase : List[Any] =json.loads(__lowerCAmelCase )
UpperCAmelCase : int =status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__lowerCAmelCase )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) )
if len(__lowerCAmelCase ) > 0:
UpperCAmelCase : str ='''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(f'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[Any]:
'''simple docstring'''
return values.split(''',''' )
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
__snake_case = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 78 | class __snake_case :
def __init__( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple =n
UpperCAmelCase : Any =[None] * self.n
UpperCAmelCase : Tuple =0 # index of the first element
UpperCAmelCase : Union[str, Any] =0
UpperCAmelCase : str =0
def __len__( self ) -> int:
'''simple docstring'''
return self.size
def UpperCAmelCase__ ( self ) -> bool:
'''simple docstring'''
return self.size == 0
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def UpperCAmelCase__ ( self , snake_case__ ) -> Dict:
'''simple docstring'''
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
UpperCAmelCase : Tuple =data
UpperCAmelCase : Optional[Any] =(self.rear + 1) % self.n
self.size += 1
return self
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.size == 0:
raise Exception('''UNDERFLOW''' )
UpperCAmelCase : int =self.array[self.front]
UpperCAmelCase : Any =None
UpperCAmelCase : Tuple =(self.front + 1) % self.n
self.size -= 1
return temp
| 78 | 1 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def UpperCamelCase ( __lowerCamelCase : Dict[str, torch.Tensor] ):
snake_case : List[str] = []
snake_case : Optional[int] = []
snake_case : Any = []
for rt in rc.restypes:
snake_case : List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
snake_case : str = {name: i for i, name in enumerate(__lowerCamelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
snake_case : Optional[Any] = torch.tensor(
__lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
snake_case : List[Any] = torch.tensor(
__lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
snake_case : int = torch.tensor(
__lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , )
snake_case : int = protein["aatype"].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
snake_case : List[Any] = restype_atomaa_to_atomaa[protein_aatype]
snake_case : str = restype_atomaa_mask[protein_aatype]
snake_case : str = residx_atomaa_mask
snake_case : Any = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
snake_case : List[str] = restype_atomaa_to_atomaa[protein_aatype]
snake_case : List[Any] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
snake_case : Union[str, Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device )
for restype, restype_letter in enumerate(rc.restypes ):
snake_case : Optional[int] = rc.restype_atoa[restype_letter]
snake_case : Any = rc.residue_atoms[restype_name]
for atom_name in atom_names:
snake_case : List[Any] = rc.atom_order[atom_name]
snake_case : Optional[Any] = 1
snake_case : List[Any] = restype_atomaa_mask[protein_aatype]
snake_case : int = residx_atomaa_mask
return protein
def UpperCamelCase ( __lowerCamelCase : Dict[str, torch.Tensor] ):
snake_case : Dict = tree_map(lambda __lowerCamelCase : torch.tensor(__lowerCamelCase , device=batch["aatype"].device ) , __lowerCamelCase , np.ndarray )
snake_case : List[str] = tensor_tree_map(lambda __lowerCamelCase : np.array(__lowerCamelCase ) , make_atomaa_masks(__lowerCamelCase ) )
return out
| 59 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''gpt_neox'''
def __init__( self : List[Any] , __UpperCAmelCase : List[Any]=50432 , __UpperCAmelCase : Any=6144 , __UpperCAmelCase : List[str]=44 , __UpperCAmelCase : List[Any]=64 , __UpperCAmelCase : List[str]=24576 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Tuple=0.25 , __UpperCAmelCase : Optional[Any]=10000 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Tuple=2048 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Union[str, Any]=1E-5 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : str=True , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : Tuple , ):
'''simple docstring'''
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
_A = vocab_size
_A = max_position_embeddings
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = rotary_pct
_A = rotary_emb_base
_A = attention_dropout
_A = hidden_dropout
_A = classifier_dropout
_A = initializer_range
_A = layer_norm_eps
_A = use_cache
_A = tie_word_embeddings
_A = use_parallel_residual
_A = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'''got {self.rope_scaling}''' )
_A = self.rope_scaling.get("type" , __UpperCAmelCase )
_A = self.rope_scaling.get("factor" , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 79 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
_snake_case = logging.get_logger(__name__)
@add_end_docstrings(a)
class UpperCAmelCase_ ( a):
def __init__( self, **__a):
'''simple docstring'''
super().__init__(**__a)
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch.")
requires_backends(self, "vision")
self.check_model_type(__a)
def __call__( self, __a, __a = None, **__a, ):
'''simple docstring'''
if "text_queries" in kwargs:
_lowerCAmelCase : str = kwargs.pop("text_queries")
if isinstance(__a, (str, Image.Image)):
_lowerCAmelCase : int = {"image": image, "candidate_labels": candidate_labels}
else:
_lowerCAmelCase : Optional[Any] = image
_lowerCAmelCase : Tuple = super().__call__(__a, **__a)
return results
def snake_case__ ( self, **__a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {}
if "threshold" in kwargs:
_lowerCAmelCase : Dict = kwargs["threshold"]
if "top_k" in kwargs:
_lowerCAmelCase : str = kwargs["top_k"]
return {}, {}, postprocess_params
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_image(inputs["image"])
_lowerCAmelCase : Optional[Any] = inputs["candidate_labels"]
if isinstance(__a, __a):
_lowerCAmelCase : Optional[Any] = candidate_labels.split(",")
_lowerCAmelCase : Optional[int] = torch.tensor([[image.height, image.width]], dtype=torch.intaa)
for i, candidate_label in enumerate(__a):
_lowerCAmelCase : Any = self.tokenizer(__a, return_tensors=self.framework)
_lowerCAmelCase : Optional[Any] = self.image_processor(__a, return_tensors=self.framework)
yield {
"is_last": i == len(__a) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = model_inputs.pop("target_size")
_lowerCAmelCase : str = model_inputs.pop("candidate_label")
_lowerCAmelCase : str = model_inputs.pop("is_last")
_lowerCAmelCase : Optional[int] = self.model(**__a)
_lowerCAmelCase : Dict = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def snake_case__ ( self, __a, __a=0.1, __a=None):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for model_output in model_outputs:
_lowerCAmelCase : Any = model_output["candidate_label"]
_lowerCAmelCase : Union[str, Any] = BaseModelOutput(__a)
_lowerCAmelCase : str = self.image_processor.post_process_object_detection(
outputs=__a, threshold=__a, target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
_lowerCAmelCase : int = outputs["scores"][index].item()
_lowerCAmelCase : Optional[Any] = self._get_bounding_box(outputs["boxes"][index][0])
_lowerCAmelCase : Optional[int] = {"score": score, "label": label, "box": box}
results.append(__a)
_lowerCAmelCase : Optional[Any] = sorted(__a, key=lambda __a: x["score"], reverse=__a)
if top_k:
_lowerCAmelCase : Union[str, Any] = results[:top_k]
return results
def snake_case__ ( self, __a):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
_lowerCAmelCase : Any = box.int().tolist()
_lowerCAmelCase : int = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 366 |
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a = 0):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = row, column
_lowerCAmelCase : str = [[default_value for c in range(__a)] for r in range(__a)]
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
_lowerCAmelCase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowerCAmelCase : List[str] = max(__a, len(str(__a)))
_lowerCAmelCase : Union[str, Any] = f"%{max_element_length}s"
# Make string and return
def single_line(__a) -> str:
nonlocal string_format_identifier
_lowerCAmelCase : Dict = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(__a) for row_vector in self.array)
return s
def __repr__( self):
'''simple docstring'''
return str(self)
def snake_case__ ( self, __a):
'''simple docstring'''
if not (isinstance(__a, (list, tuple)) and len(__a) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
return self.array[loc[0]][loc[1]]
def __setitem__( self, __a, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
_lowerCAmelCase : Union[str, Any] = value
def __add__( self, __a):
'''simple docstring'''
assert isinstance(__a, __a)
assert self.row == another.row and self.column == another.column
# Add
_lowerCAmelCase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c] + another[r, c]
return result
def __neg__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : str = -self[r, c]
return result
def __sub__( self, __a):
'''simple docstring'''
return self + (-another)
def __mul__( self, __a):
'''simple docstring'''
if isinstance(__a, (int, float)): # Scalar multiplication
_lowerCAmelCase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Optional[Any] = self[r, c] * another
return result
elif isinstance(__a, __a): # Matrix multiplication
assert self.column == another.row
_lowerCAmelCase : List[str] = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowerCAmelCase : Optional[Any] = f"Unsupported type given for another ({type(__a)})"
raise TypeError(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c]
return result
def snake_case__ ( self, __a, __a):
'''simple docstring'''
assert isinstance(__a, __a) and isinstance(__a, __a)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowerCAmelCase : int = v.transpose()
_lowerCAmelCase : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowerCAmelCase : Union[str, Any] = 1
print(F"a^(-1) is {ainv}" )
# u, v
_lowerCAmelCase : Any = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 1, 2, -3
_lowerCAmelCase : List[Any] = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}" )
def A ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 300 | 0 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowercase__ = logging.get_logger(__name__)
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
def constraint_to_multiple_of(lowercase__ , lowercase__ , lowercase__=0 , lowercase__=None ):
_lowerCamelCase : List[Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowerCamelCase : Optional[int] = math.floor(val / multiple ) * multiple
if x < min_val:
_lowerCamelCase : List[str] = math.ceil(val / multiple ) * multiple
return x
_lowerCamelCase : Optional[int] = (output_size, output_size) if isinstance(lowercase__ , lowercase__ ) else output_size
_lowerCamelCase, _lowerCamelCase : List[str] = get_image_size(lowercase__ )
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = output_size
# determine new height and width
_lowerCamelCase : Dict = output_height / input_height
_lowerCamelCase : Any = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowerCamelCase : Tuple = scale_width
else:
# fit height
_lowerCamelCase : Any = scale_height
_lowerCamelCase : Optional[int] = constraint_to_multiple_of(scale_height * input_height , multiple=lowercase__ )
_lowerCamelCase : Any = constraint_to_multiple_of(scale_width * input_width , multiple=lowercase__ )
return (new_height, new_width)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""pixel_values"""]
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BILINEAR , lowercase = False , lowercase = 1 , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(**lowercase )
_lowerCamelCase : Optional[int] = size if size is not None else {'height': 384, 'width': 384}
_lowerCamelCase : Union[str, Any] = get_size_dict(lowercase )
_lowerCamelCase : Optional[Any] = do_resize
_lowerCamelCase : List[Any] = size
_lowerCamelCase : Tuple = keep_aspect_ratio
_lowerCamelCase : List[Any] = ensure_multiple_of
_lowerCamelCase : str = resample
_lowerCamelCase : Optional[Any] = do_rescale
_lowerCamelCase : Union[str, Any] = rescale_factor
_lowerCamelCase : int = do_normalize
_lowerCamelCase : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCamelCase : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self , lowercase , lowercase , lowercase = False , lowercase = 1 , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ):
_lowerCamelCase : str = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
_lowerCamelCase : Optional[Any] = get_resize_output_image_size(
lowercase , output_size=(size['height'], size['width']) , keep_aspect_ratio=lowercase , multiple=lowercase , )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
_lowerCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : List[str] = size if size is not None else self.size
_lowerCamelCase : Tuple = get_size_dict(lowercase )
_lowerCamelCase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowerCamelCase : int = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowerCamelCase : int = resample if resample is not None else self.resample
_lowerCamelCase : int = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : Dict = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
_lowerCamelCase : List[Any] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_lowerCamelCase : Tuple = [to_numpy_array(lowercase ) for image in images]
if do_resize:
_lowerCamelCase : List[Any] = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_rescale:
_lowerCamelCase : List[Any] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
_lowerCamelCase : List[str] = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
_lowerCamelCase : Optional[Any] = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
_lowerCamelCase : Dict = {'pixel_values': images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
def A_ ( self , lowercase , lowercase = None ):
_lowerCamelCase : int = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase ) != len(lowercase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(lowercase ):
_lowerCamelCase : Union[str, Any] = target_sizes.numpy()
_lowerCamelCase : Union[str, Any] = []
for idx in range(len(lowercase ) ):
_lowerCamelCase : Tuple = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase )
_lowerCamelCase : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase )
else:
_lowerCamelCase : Dict = logits.argmax(dim=1 )
_lowerCamelCase : int = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 96 |
def __A ( __lowerCamelCase ) -> int:
a = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
a = hex_num[0] == """-"""
if is_negative:
a = hex_num[1:]
try:
a = int(__lowerCamelCase , 16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
a = """"""
while int_num > 0:
a = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 228 | 0 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowercase : Tuple = False
lowercase : str = logging.get_logger(__name__)
lowercase : List[str] = "ybelkada/fonts"
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '
'Pix2StructImageProcessor. Please upgrade torch.' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Optional[int]:
requires_backends(__A , ['torch'] )
_check_torch_version()
_snake_case = image_tensor.unsqueeze(0 )
_snake_case = torch.nn.functional.unfold(__A , (patch_height, patch_width) , stride=(patch_height, patch_width) )
_snake_case = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , __A , __A , -1 )
_snake_case = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def SCREAMING_SNAKE_CASE__ ( __A , __A = 36 , __A = "black" , __A = "white" , __A = 5 , __A = 5 , __A = 5 , __A = 5 , __A = None , __A = None , ) -> Image.Image:
requires_backends(__A , 'vision' )
# Add new lines so that each line is no more than 80 characters.
_snake_case = textwrap.TextWrapper(width=80 )
_snake_case = wrapper.wrap(text=__A )
_snake_case = '\n'.join(__A )
if font_bytes is not None and font_path is None:
_snake_case = io.BytesIO(__A )
elif font_path is not None:
_snake_case = font_path
else:
_snake_case = hf_hub_download(__A , 'Arial.TTF' )
_snake_case = ImageFont.truetype(__A , encoding='UTF-8' , size=__A )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
_snake_case = ImageDraw.Draw(Image.new('RGB' , (1, 1) , __A ) )
_snake_case , _snake_case , _snake_case , _snake_case = temp_draw.textbbox((0, 0) , __A , __A )
# Create the actual image with a bit of padding around the text.
_snake_case = text_width + left_padding + right_padding
_snake_case = text_height + top_padding + bottom_padding
_snake_case = Image.new('RGB' , (image_width, image_height) , __A )
_snake_case = ImageDraw.Draw(__A )
draw.text(xy=(left_padding, top_padding) , text=__A , fill=__A , font=__A )
return image
def SCREAMING_SNAKE_CASE__ ( __A , __A , **__A ) -> Dict:
requires_backends(__A , 'vision' )
# Convert to PIL image if necessary
_snake_case = to_pil_image(__A )
_snake_case = render_text(__A , **__A )
_snake_case = max(header_image.width , image.width )
_snake_case = int(image.height * (new_width / image.width) )
_snake_case = int(header_image.height * (new_width / header_image.width) )
_snake_case = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
_snake_case = to_numpy_array(__A )
if infer_channel_dimension_format(__A ) == ChannelDimension.LAST:
_snake_case = to_channel_dimension_format(__A , ChannelDimension.LAST )
return new_image
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = ["""flattened_patches"""]
def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = 20_48 , lowerCAmelCase_ = False , **lowerCAmelCase_ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_snake_case = patch_size if patch_size is not None else {'height': 16, 'width': 16}
_snake_case = do_normalize
_snake_case = do_convert_rgb
_snake_case = max_patches
_snake_case = is_vqa
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
requires_backends(self.extract_flattened_patches , 'torch' )
_check_torch_version()
# convert to torch
_snake_case = to_channel_dimension_format(lowerCAmelCase_ , ChannelDimension.FIRST )
_snake_case = torch.from_numpy(lowerCAmelCase_ )
_snake_case , _snake_case = patch_size['height'], patch_size['width']
_snake_case , _snake_case = get_image_size(lowerCAmelCase_ )
# maximize scale s.t.
_snake_case = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
_snake_case = max(min(math.floor(scale * image_height / patch_height ) , lowerCAmelCase_ ) , 1 )
_snake_case = max(min(math.floor(scale * image_width / patch_width ) , lowerCAmelCase_ ) , 1 )
_snake_case = max(num_feasible_rows * patch_height , 1 )
_snake_case = max(num_feasible_cols * patch_width , 1 )
_snake_case = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='bilinear' , align_corners=lowerCAmelCase_ , antialias=lowerCAmelCase_ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
_snake_case = torch_extract_patches(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = patches.shape
_snake_case = patches_shape[1]
_snake_case = patches_shape[2]
_snake_case = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
_snake_case = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
_snake_case = torch.arange(lowerCAmelCase_ ).reshape([rows, 1] ).repeat(1 , lowerCAmelCase_ ).reshape([rows * columns, 1] )
_snake_case = torch.arange(lowerCAmelCase_ ).reshape([1, columns] ).repeat(lowerCAmelCase_ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
_snake_case = row_ids.to(torch.floataa )
_snake_case = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
_snake_case = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
_snake_case = torch.nn.functional.pad(lowerCAmelCase_ , [0, 0, 0, max_patches - (rows * columns)] ).float()
_snake_case = to_numpy_array(lowerCAmelCase_ )
return result
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ):
"""simple docstring"""
if image.dtype == np.uinta:
_snake_case = image.astype(np.floataa )
# take mean across the whole `image`
_snake_case = np.mean(lowerCAmelCase_ )
_snake_case = np.std(lowerCAmelCase_ )
_snake_case = max(lowerCAmelCase_ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_snake_case = patch_size if patch_size is not None else self.patch_size
_snake_case = max_patches if max_patches is not None else self.max_patches
_snake_case = self.is_vqa
if kwargs.get('data_format' , lowerCAmelCase_ ) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ' )
_snake_case = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_snake_case = [convert_to_rgb(lowerCAmelCase_ ) for image in images]
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.' )
_snake_case = kwargs.pop('font_bytes' , lowerCAmelCase_ )
_snake_case = kwargs.pop('font_path' , lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = [header_text] * len(lowerCAmelCase_ )
_snake_case = [
render_header(lowerCAmelCase_ , header_text[i] , font_bytes=lowerCAmelCase_ , font_path=lowerCAmelCase_ )
for i, image in enumerate(lowerCAmelCase_ )
]
if do_normalize:
_snake_case = [self.normalize(image=lowerCAmelCase_ ) for image in images]
# convert to torch tensor and permute
_snake_case = [
self.extract_flattened_patches(image=lowerCAmelCase_ , max_patches=lowerCAmelCase_ , patch_size=lowerCAmelCase_ )
for image in images
]
# create attention mask in numpy
_snake_case = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
_snake_case = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks} , tensor_type=lowerCAmelCase_ )
return encoded_outputs
| 160 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __UpperCAmelCase :
@staticmethod
def lowerCamelCase ( *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( __A ) -> str:
_snake_case = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def SCREAMING_SNAKE_CASE__ ( __A ) -> Dict:
_snake_case = np.array(__A )
_snake_case = npimg.shape
return {"hash": hashimage(__A ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
__lowercase = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__lowercase = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = MaskGenerationPipeline(model=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
@slow
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
_snake_case = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=2_56 )
# Shortening by hashing
_snake_case = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCAmelCase_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_80, 6_40)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (4_80, 6_40)}, 'scores': 1.021},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_80, 6_40)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_80, 6_40)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (4_80, 6_40)}, 'scores': 1.0053},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (4_80, 6_40)}, 'scores': 0.9967},
{'mask': {'hash': '453c7844bd', 'shape': (4_80, 6_40)}, 'scores': 0.993},
{'mask': {'hash': '3d44f2926d', 'shape': (4_80, 6_40)}, 'scores': 0.9909},
{'mask': {'hash': '64033ddc3f', 'shape': (4_80, 6_40)}, 'scores': 0.9879},
{'mask': {'hash': '801064ff79', 'shape': (4_80, 6_40)}, 'scores': 0.9834},
{'mask': {'hash': '6172f276ef', 'shape': (4_80, 6_40)}, 'scores': 0.9716},
{'mask': {'hash': 'b49e60e084', 'shape': (4_80, 6_40)}, 'scores': 0.9612},
{'mask': {'hash': 'a811e775fd', 'shape': (4_80, 6_40)}, 'scores': 0.9599},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (4_80, 6_40)}, 'scores': 0.9552},
{'mask': {'hash': '9d8257e080', 'shape': (4_80, 6_40)}, 'scores': 0.9532},
{'mask': {'hash': '32de6454a8', 'shape': (4_80, 6_40)}, 'scores': 0.9516},
{'mask': {'hash': 'af3d4af2c8', 'shape': (4_80, 6_40)}, 'scores': 0.9499},
{'mask': {'hash': '3c6db475fb', 'shape': (4_80, 6_40)}, 'scores': 0.9483},
{'mask': {'hash': 'c290813fb9', 'shape': (4_80, 6_40)}, 'scores': 0.9464},
{'mask': {'hash': 'b6f0b8f606', 'shape': (4_80, 6_40)}, 'scores': 0.943},
{'mask': {'hash': '92ce16bfdf', 'shape': (4_80, 6_40)}, 'scores': 0.943},
{'mask': {'hash': 'c749b25868', 'shape': (4_80, 6_40)}, 'scores': 0.9408},
{'mask': {'hash': 'efb6cab859', 'shape': (4_80, 6_40)}, 'scores': 0.9335},
{'mask': {'hash': '1ff2eafb30', 'shape': (4_80, 6_40)}, 'scores': 0.9326},
{'mask': {'hash': '788b798e24', 'shape': (4_80, 6_40)}, 'scores': 0.9262},
{'mask': {'hash': 'abea804f0e', 'shape': (4_80, 6_40)}, 'scores': 0.8999},
{'mask': {'hash': '7b9e8ddb73', 'shape': (4_80, 6_40)}, 'scores': 0.8986},
{'mask': {'hash': 'cd24047c8a', 'shape': (4_80, 6_40)}, 'scores': 0.8984},
{'mask': {'hash': '6943e6bcbd', 'shape': (4_80, 6_40)}, 'scores': 0.8873},
{'mask': {'hash': 'b5f47c9191', 'shape': (4_80, 6_40)}, 'scores': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'facebook/sam-vit-huge'
_snake_case = pipeline('mask-generation' , model=lowerCAmelCase_ )
_snake_case = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
_snake_case = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCAmelCase_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_80, 6_40)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (4_80, 6_40)}, 'scores': 1.0210},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_80, 6_40)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_80, 6_40)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (4_80, 6_40)}, 'scores': 1.0053},
] , )
| 160 | 1 |
"""simple docstring"""
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase ( __UpperCAmelCase ):
def a ( self : List[str] ):
__UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , '''num_heads''' ) )
class _UpperCAmelCase :
def __init__( self : Optional[int] , _lowercase : int , _lowercase : List[str]=13 , _lowercase : Tuple=64 , _lowercase : str=3 , _lowercase : int=[16, 48, 96] , _lowercase : List[Any]=[1, 3, 6] , _lowercase : Tuple=[1, 2, 10] , _lowercase : str=[7, 3, 3] , _lowercase : List[Any]=[4, 2, 2] , _lowercase : Any=[2, 1, 1] , _lowercase : Tuple=[2, 2, 2] , _lowercase : int=[False, False, True] , _lowercase : Union[str, Any]=[0.0, 0.0, 0.0] , _lowercase : Union[str, Any]=0.02 , _lowercase : Union[str, Any]=1E-12 , _lowercase : Optional[int]=True , _lowercase : Union[str, Any]=True , _lowercase : Tuple=2 , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_sizes
__UpperCAmelCase = patch_stride
__UpperCAmelCase = patch_padding
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = num_labels
__UpperCAmelCase = num_channels
__UpperCAmelCase = embed_dim
__UpperCAmelCase = num_heads
__UpperCAmelCase = stride_kv
__UpperCAmelCase = depth
__UpperCAmelCase = cls_token
__UpperCAmelCase = attention_drop_rate
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
def a ( self : Optional[int] ):
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def a ( self : Union[str, Any] ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def a ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : str ):
__UpperCAmelCase = CvtModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCAmelCase = model(UpperCamelCase__ )
__UpperCAmelCase = (self.image_size, self.image_size)
__UpperCAmelCase , __UpperCAmelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def a ( self : Union[str, Any] , _lowercase : Dict , _lowercase : Dict , _lowercase : Optional[int] ):
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = CvtForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCAmelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self : Dict ):
__UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
a__ : List[str] = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
a__ : Optional[Any] = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
a__ : Union[str, Any] = False
a__ : Tuple = False
a__ : Optional[int] = False
a__ : List[str] = False
a__ : Dict = False
def a ( self : Any ):
__UpperCAmelCase = CvtModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def a ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a ( self : Optional[int] ):
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def a ( self : Union[str, Any] ):
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def a ( self : Optional[int] ):
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def a ( self : str ):
pass
def a ( self : List[str] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(UpperCamelCase__ )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def a ( self : int ):
def check_hidden_states_output(_lowercase : Dict , _lowercase : Union[str, Any] , _lowercase : Optional[int] ):
__UpperCAmelCase = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__UpperCAmelCase = outputs.hidden_states
__UpperCAmelCase = len(self.model_tester.depth )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def a ( self : List[str] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a ( self : Any ):
pass
@slow
def a ( self : str ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = CvtModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowercase__ ( ):
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def a ( self : str ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def a ( self : Optional[Any] ):
__UpperCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCamelCase__ )
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
__UpperCAmelCase = model(**UpperCamelCase__ )
# verify the logits
__UpperCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
__UpperCAmelCase = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 332 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __UpperCamelCase ( _A = 3 ):
if isinstance(_A , _A ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_A ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
lowerCAmelCase_ = QuantumRegister(_A , '''qr''' )
lowerCAmelCase_ = ClassicalRegister(_A , '''cr''' )
lowerCAmelCase_ = QuantumCircuit(_A , _A )
lowerCAmelCase_ = number_of_qubits
for i in range(_A ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_A ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _A , _A )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_A , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_A , _A )
# simulate with 10000 shots
lowerCAmelCase_ = Aer.get_backend('''qasm_simulator''' )
lowerCAmelCase_ = execute(_A , _A , shots=10000 )
return job.result().get_counts(_A )
if __name__ == "__main__":
print(
f"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 278 | 0 |
import numpy as np
def lowerCAmelCase_ (lowerCAmelCase__: np.array ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase_ (lowerCAmelCase__: np.array ):
"""simple docstring"""
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=[10, 20, 30, 40], SCREAMING_SNAKE_CASE_=[2, 2, 3, 2], SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=["stage2", "stage3", "stage4"], SCREAMING_SNAKE_CASE_=[2, 3, 4], SCREAMING_SNAKE_CASE_=None, ) -> int:
UpperCAmelCase_: Dict = parent
UpperCAmelCase_: Any = batch_size
UpperCAmelCase_: Union[str, Any] = image_size
UpperCAmelCase_: List[Any] = num_channels
UpperCAmelCase_: Union[str, Any] = num_stages
UpperCAmelCase_: Tuple = hidden_sizes
UpperCAmelCase_: int = depths
UpperCAmelCase_: Union[str, Any] = is_training
UpperCAmelCase_: List[Any] = use_labels
UpperCAmelCase_: Optional[Any] = intermediate_size
UpperCAmelCase_: Optional[int] = hidden_act
UpperCAmelCase_: List[Any] = num_labels
UpperCAmelCase_: Union[str, Any] = initializer_range
UpperCAmelCase_: Dict = out_features
UpperCAmelCase_: List[str] = out_indices
UpperCAmelCase_: str = scope
def __snake_case (self ) -> str:
UpperCAmelCase_: Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_: List[Any] = None
if self.use_labels:
UpperCAmelCase_: List[Any] = ids_tensor([self.batch_size], self.num_labels )
UpperCAmelCase_: int = self.get_config()
return config, pixel_values, labels
def __snake_case (self ) -> List[str]:
return ConvNextConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=SCREAMING_SNAKE_CASE_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCAmelCase_: Tuple = ConvNextModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: List[str] = model(SCREAMING_SNAKE_CASE_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCAmelCase_: List[Any] = ConvNextForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Dict = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCAmelCase_: Union[str, Any] = ConvNextBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase_: Any = None
UpperCAmelCase_: str = ConvNextBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Dict = model(SCREAMING_SNAKE_CASE_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: int = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Dict = config_and_inputs
UpperCAmelCase_: int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
A = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
A = True
A = False
A = False
A = False
A = False
def __snake_case (self ) -> int:
UpperCAmelCase_: Any = ConvNextModelTester(self )
UpperCAmelCase_: Union[str, Any] = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def __snake_case (self ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case (self ) -> Optional[int]:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def __snake_case (self ) -> Optional[int]:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def __snake_case (self ) -> Any:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def __snake_case (self ) -> Dict:
pass
def __snake_case (self ) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: List[str] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_: Dict = [*signature.parameters.keys()]
UpperCAmelCase_: Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> str:
UpperCAmelCase_: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Optional[Any]:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: Optional[int] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_: Dict = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_: Dict = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
UpperCAmelCase_ , UpperCAmelCase_: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: str = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_: List[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case (self ) -> Tuple:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_: int = ConvNextModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def __snake_case (self ) -> List[str]:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: List[Any] = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = self.default_image_processor
UpperCAmelCase_: Any = prepare_img()
UpperCAmelCase_: Any = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_: Tuple = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCAmelCase_: Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1E-4 ) )
@require_torch
class _a ( unittest.TestCase , _lowerCAmelCase ):
A = (ConvNextBackbone,) if is_torch_available() else ()
A = ConvNextConfig
A = False
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: Any = ConvNextModelTester(self )
| 82 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "yolos"
def __init__( self , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1E-12 , A_=[512, 864] , A_=16 , A_=3 , A_=True , A_=100 , A_=True , A_=False , A_=1 , A_=5 , A_=2 , A_=5 , A_=2 , A_=0.1 , **A_ , ) -> Any:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =qkv_bias
__UpperCamelCase =num_detection_tokens
__UpperCamelCase =use_mid_position_embeddings
__UpperCamelCase =auxiliary_loss
# Hungarian matcher
__UpperCamelCase =class_cost
__UpperCamelCase =bbox_cost
__UpperCamelCase =giou_cost
# Loss coefficients
__UpperCamelCase =bbox_loss_coefficient
__UpperCamelCase =giou_loss_coefficient
__UpperCamelCase =eos_coefficient
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : str = version.parse("1.11" )
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _a ( self ) -> float:
return 1E-4
@property
def _a ( self ) -> int:
return 12
| 62 | 0 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
A_ : Union[str, Any] = datasets.logging.get_logger(__name__)
A_ : List[Any] = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
A_ : Union[str, Any] = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
A_ : int = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
A_ : List[Any] = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a (datasets.Metric ):
'''simple docstring'''
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , )
def __A ( self , A__ ):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" )
A__ : Optional[Any] = """bleurt-base-128"""
if self.config_name.lower() in CHECKPOINT_URLS:
A__ : Tuple = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
A__ : List[str] = self.config_name.upper()
else:
raise KeyError(
F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" )
# download the model checkpoint specified by self.config_name and set up the scorer
A__ : int = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
A__ : Union[str, Any] = score.BleurtScorer(os.path.join(A__ , A__ ) )
def __A ( self , A__ , A__ ):
A__ : int = self.scorer.score(references=A__ , candidates=A__ )
return {"scores": scores}
| 141 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _a :
'''simple docstring'''
def __init__( self , A__ , ):
A__ : Any = parent
A__ : Any = 13
A__ : Optional[Any] = 7
A__ : Union[str, Any] = 30
A__ : str = self.seq_length + self.mem_len
A__ : Dict = 15
A__ : int = True
A__ : Tuple = True
A__ : Union[str, Any] = 99
A__ : Optional[Any] = [10, 50, 80]
A__ : str = 32
A__ : Tuple = 32
A__ : Union[str, Any] = 4
A__ : Optional[Any] = 8
A__ : int = 128
A__ : List[Any] = 2
A__ : List[str] = 2
A__ : int = None
A__ : List[str] = 1
A__ : Union[str, Any] = 0
A__ : List[str] = 3
A__ : int = self.vocab_size - 1
A__ : Optional[Any] = 0.0_1
def __A ( self ):
A__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Optional[Any] = None
if self.use_labels:
A__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Any = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __A ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __A ( self , A__ , A__ , A__ , A__ ):
A__ : Dict = TFTransfoXLModel(A__ )
A__ , A__ : Tuple = model(A__ ).to_tuple()
A__ : List[str] = {"""input_ids""": input_ids_a, """mems""": mems_a}
A__ , A__ : str = model(A__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , A__ , A__ , A__ , A__ ):
A__ : Optional[int] = TFTransfoXLLMHeadModel(A__ )
A__ , A__ : int = model(A__ ).to_tuple()
A__ : int = {"""input_ids""": input_ids_a, """labels""": lm_labels}
A__ , A__ : Optional[Any] = model(A__ ).to_tuple()
A__ , A__ : Union[str, Any] = model([input_ids_a, mems_a] ).to_tuple()
A__ : Any = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
A__ , A__ : Tuple = model(A__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , A__ , A__ , A__ , A__ ):
A__ : Any = TFTransfoXLForSequenceClassification(A__ )
A__ : Optional[Any] = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
A__ : Optional[Any] = self.prepare_config_and_inputs()
((A__) , (A__) , (A__) , (A__)) : List[Any] = config_and_inputs
A__ : int = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
UpperCAmelCase__: Optional[Any] = () if is_tf_available() else ()
UpperCAmelCase__: int = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
UpperCAmelCase__: Optional[int] = False
UpperCAmelCase__: Optional[int] = False
UpperCAmelCase__: Tuple = False
UpperCAmelCase__: List[str] = False
def __A ( self , A__ , A__ , A__ , A__ , A__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __A ( self ):
A__ : Tuple = TFTransfoXLModelTester(self )
A__ : List[Any] = ConfigTester(self , config_class=A__ , d_embed=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
self.model_tester.set_seed()
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*A__ )
def __A ( self ):
self.model_tester.set_seed()
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*A__ )
def __A ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A__ )
def __A ( self ):
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Union[str, Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
A__ : Any = model_class(A__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
A__ : Optional[Any] = model.get_output_embeddings()
assert isinstance(A__ , tf.keras.layers.Layer )
A__ : Tuple = model.get_bias()
assert name is None
else:
A__ : Dict = model.get_output_embeddings()
assert x is None
A__ : int = model.get_bias()
assert name is None
def __A ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __A ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : List[Any] = TFTransfoXLModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def __A ( self ):
pass
@require_tf
class _a (unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def __A ( self ):
A__ : List[Any] = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
A__ : Tuple = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
A__ : Dict = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
A__ : Any = model.generate(A__ , max_length=200 , do_sample=A__ )
self.assertListEqual(output_ids[0].numpy().tolist() , A__ )
| 141 | 1 |
def snake_case_ ( lowerCAmelCase_ : List[str] ):
__lowercase : Optional[int] = len(lowerCAmelCase_ )
for i in range(length - 1 ):
__lowercase : Optional[Any] = i
for k in range(i + 1 , lowerCAmelCase_ ):
if collection[k] < collection[least]:
__lowercase : Dict = k
if least != i:
__lowercase , __lowercase : Union[str, Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowerCamelCase : Tuple = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted)) | 233 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__lowercase : Optional[int] = TapasConfig.from_json_file(lowerCAmelCase_ )
# set absolute/relative position embeddings parameter
__lowercase : Optional[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__lowercase : Union[str, Any] = TapasForQuestionAnswering(config=lowerCAmelCase_ )
elif task == "WTQ":
# run_task_main.py hparams
__lowercase : List[Any] = 4
__lowercase : Union[str, Any] = True
# hparam_utils.py hparams
__lowercase : Any = 0.664_694
__lowercase : Tuple = 0.207_951
__lowercase : Dict = 0.121_194
__lowercase : List[str] = True
__lowercase : str = True
__lowercase : Dict = False
__lowercase : Tuple = 0.0_352_513
__lowercase : List[Any] = TapasForQuestionAnswering(config=lowerCAmelCase_ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__lowercase : Optional[int] = 4
__lowercase : int = False
# hparam_utils.py hparams
__lowercase : Tuple = 36.4_519
__lowercase : str = 0.903_421
__lowercase : List[Any] = 222.088
__lowercase : Union[str, Any] = True
__lowercase : Tuple = True
__lowercase : Union[str, Any] = True
__lowercase : Optional[Any] = 0.763_141
__lowercase : str = TapasForQuestionAnswering(config=lowerCAmelCase_ )
elif task == "TABFACT":
__lowercase : List[Any] = TapasForSequenceClassification(config=lowerCAmelCase_ )
elif task == "MLM":
__lowercase : Optional[int] = TapasForMaskedLM(config=lowerCAmelCase_ )
elif task == "INTERMEDIATE_PRETRAINING":
__lowercase : Dict = TapasModel(config=lowerCAmelCase_ )
else:
raise ValueError(F"Task {task} not supported." )
print(F"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model (weights and configuration)
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(lowerCAmelCase_ )
# Save tokenizer files
print(F"Save tokenizer files to {pytorch_dump_path}" )
__lowercase : Any = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 )
tokenizer.save_pretrained(lowerCAmelCase_ )
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
) | 233 | 1 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Optional[int] = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class A__ ( __snake_case ):
_UpperCAmelCase :Optional[int] = 'data2vec-audio'
def __init__( self , A_=32 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0 , A_=0.1 , A_=0.1 , A_=0.02 , A_=1e-5 , A_="gelu" , A_=(512, 512, 512, 512, 512, 512, 512) , A_=(5, 2, 2, 2, 2, 2, 2) , A_=(10, 3, 3, 3, 3, 2, 2) , A_=False , A_=16 , A_=19 , A_=5 , A_=0.05 , A_=10 , A_=2 , A_=0.0 , A_=10 , A_=0 , A_="sum" , A_=False , A_=False , A_=256 , A_=(512, 512, 512, 512, 1500) , A_=(5, 3, 3, 1, 1) , A_=(1, 2, 3, 1, 1) , A_=512 , A_=0 , A_=1 , A_=2 , A_=False , A_=3 , A_=2 , A_=3 , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : Union[str, Any] = feat_extract_activation
UpperCamelCase : str = list(A_ )
UpperCamelCase : Union[str, Any] = list(A_ )
UpperCamelCase : Optional[int] = list(A_ )
UpperCamelCase : int = conv_bias
UpperCamelCase : Optional[int] = num_conv_pos_embeddings
UpperCamelCase : Optional[Any] = num_conv_pos_embedding_groups
UpperCamelCase : List[Any] = conv_pos_kernel_size
UpperCamelCase : str = len(self.conv_dim )
UpperCamelCase : Optional[Any] = num_hidden_layers
UpperCamelCase : List[Any] = intermediate_size
UpperCamelCase : Any = hidden_act
UpperCamelCase : List[Any] = num_attention_heads
UpperCamelCase : List[Any] = hidden_dropout
UpperCamelCase : List[Any] = attention_dropout
UpperCamelCase : Optional[int] = activation_dropout
UpperCamelCase : Optional[int] = feat_proj_dropout
UpperCamelCase : Tuple = final_dropout
UpperCamelCase : List[str] = layerdrop
UpperCamelCase : Any = layer_norm_eps
UpperCamelCase : Tuple = initializer_range
UpperCamelCase : Dict = vocab_size
UpperCamelCase : List[Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase : str = mask_time_prob
UpperCamelCase : Dict = mask_time_length
UpperCamelCase : Dict = mask_time_min_masks
UpperCamelCase : Optional[Any] = mask_feature_prob
UpperCamelCase : Union[str, Any] = mask_feature_length
UpperCamelCase : Optional[int] = mask_feature_min_masks
# ctc loss
UpperCamelCase : Any = ctc_loss_reduction
UpperCamelCase : Optional[Any] = ctc_zero_infinity
# adapter
UpperCamelCase : Tuple = add_adapter
UpperCamelCase : Tuple = adapter_kernel_size
UpperCamelCase : List[Any] = adapter_stride
UpperCamelCase : List[str] = num_adapter_layers
UpperCamelCase : Optional[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase : int = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase : Tuple = list(A_ )
UpperCamelCase : Tuple = list(A_ )
UpperCamelCase : Dict = list(A_ )
UpperCamelCase : Union[str, Any] = xvector_output_dim
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return math.prod(self.conv_stride )
| 140 |
class A__ : # Public class to implement a graph
def __init__( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = row
UpperCamelCase : Any = col
UpperCamelCase : Optional[Any] = graph
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Any = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
UpperCamelCase : Dict = [-1, 0, 1, -1, 1, -1, 0, 1]
UpperCamelCase : Any = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , A_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , A_ )
def __UpperCamelCase( self ): # And finally, count all islands.
'''simple docstring'''
UpperCamelCase : str = [[False for j in range(self.COL )] for i in range(self.ROW )]
UpperCamelCase : int = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(A_ , A_ , A_ )
count += 1
return count
| 140 | 1 |
'''simple docstring'''
import cva
import numpy as np
class lowerCAmelCase__ :
def __init__( self : Tuple , lowerCamelCase__ : float , lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
if k in (0.0_4, 0.0_6):
_UpperCAmelCase : Dict = k
_UpperCAmelCase : Optional[int] = window_size
else:
raise ValueError("invalid k value" )
def __str__( self : Union[str, Any] ) ->str:
'''simple docstring'''
return str(self.k )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : str ) ->tuple[cva.Mat, list[list[int]]]:
'''simple docstring'''
_UpperCAmelCase : Tuple = cva.imread(lowerCamelCase__ , 0 )
_UpperCAmelCase , _UpperCAmelCase : Tuple = img.shape
_UpperCAmelCase : list[list[int]] = []
_UpperCAmelCase : Optional[Any] = img.copy()
_UpperCAmelCase : Optional[int] = cva.cvtColor(lowerCamelCase__ , cva.COLOR_GRAY2RGB )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = np.gradient(lowerCamelCase__ )
_UpperCAmelCase : Tuple = dx**2
_UpperCAmelCase : str = dy**2
_UpperCAmelCase : int = dx * dy
_UpperCAmelCase : Dict = 0.0_4
_UpperCAmelCase : str = self.window_size // 2
for y in range(lowerCamelCase__ , h - offset ):
for x in range(lowerCamelCase__ , w - offset ):
_UpperCAmelCase : List[str] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_UpperCAmelCase : List[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_UpperCAmelCase : Tuple = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_UpperCAmelCase : Optional[Any] = (wxx * wyy) - (wxy**2)
_UpperCAmelCase : Optional[Any] = wxx + wyy
_UpperCAmelCase : List[Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase__ = HarrisCorner(0.04, 3)
lowerCamelCase__ ,lowerCamelCase__ = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 234 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCamelCase__ = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
lowerCamelCase__ = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
lowerCamelCase__ = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def lowerCAmelCase__ ( self : int ) ->MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : List[List[List[str]]] , lowerCamelCase__ : List[List[str]] , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 4 , ) ->Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCamelCase__ , hypotheses=lowerCamelCase__ , min_len=lowerCamelCase__ , max_len=lowerCamelCase__ )
}
| 234 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 307 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A__ ( __magic_name__ ):
lowercase = 'unispeech'
def __init__( self : Any , a : List[Any]=32 , a : List[Any]=768 , a : Any=12 , a : List[str]=12 , a : List[Any]=3_072 , a : Any="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : List[str]=0.1 , a : Union[str, Any]=0.0 , a : str=0.0 , a : int=0.1 , a : List[str]=0.1 , a : List[Any]=0.0_2 , a : Optional[int]=1E-5 , a : Optional[int]="group" , a : Optional[Any]="gelu" , a : List[Any]=(512, 512, 512, 512, 512, 512, 512) , a : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , a : List[str]=(10, 3, 3, 3, 3, 2, 2) , a : Union[str, Any]=False , a : Union[str, Any]=128 , a : Tuple=16 , a : Dict=False , a : str=True , a : str=0.0_5 , a : Union[str, Any]=10 , a : Tuple=2 , a : int=0.0 , a : Optional[Any]=10 , a : List[str]=0 , a : str=320 , a : List[str]=2 , a : Optional[Any]=0.1 , a : Any=100 , a : Dict=256 , a : Any=256 , a : Dict=0.1 , a : List[Any]="mean" , a : Dict=False , a : str=False , a : Optional[int]=256 , a : Any=80 , a : List[Any]=0 , a : Optional[int]=1 , a : int=2 , a : List[Any]=0.5 , **a : int , ):
'''simple docstring'''
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : List[str] = feat_extract_norm
lowerCAmelCase__ : Optional[Any] = feat_extract_activation
lowerCAmelCase__ : str = list(a )
lowerCAmelCase__ : List[str] = list(a )
lowerCAmelCase__ : Tuple = list(a )
lowerCAmelCase__ : Dict = conv_bias
lowerCAmelCase__ : Optional[int] = num_conv_pos_embeddings
lowerCAmelCase__ : Any = num_conv_pos_embedding_groups
lowerCAmelCase__ : str = len(self.conv_dim )
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = hidden_dropout
lowerCAmelCase__ : Tuple = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : Any = feat_proj_dropout
lowerCAmelCase__ : List[Any] = final_dropout
lowerCAmelCase__ : Tuple = layerdrop
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Optional[Any] = num_ctc_classes
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Dict = do_stable_layer_norm
lowerCAmelCase__ : List[Any] = use_weighted_layer_sum
lowerCAmelCase__ : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : Union[str, Any] = apply_spec_augment
lowerCAmelCase__ : Any = mask_time_prob
lowerCAmelCase__ : Dict = mask_time_length
lowerCAmelCase__ : Tuple = mask_time_min_masks
lowerCAmelCase__ : Optional[int] = mask_feature_prob
lowerCAmelCase__ : Optional[Any] = mask_feature_length
lowerCAmelCase__ : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ : int = num_codevectors_per_group
lowerCAmelCase__ : Any = num_codevector_groups
lowerCAmelCase__ : Any = contrastive_logits_temperature
lowerCAmelCase__ : int = feat_quantizer_dropout
lowerCAmelCase__ : List[Any] = num_negatives
lowerCAmelCase__ : List[str] = codevector_dim
lowerCAmelCase__ : Optional[int] = proj_codevector_dim
lowerCAmelCase__ : Dict = diversity_loss_weight
# ctc loss
lowerCAmelCase__ : Any = ctc_loss_reduction
lowerCAmelCase__ : Any = ctc_zero_infinity
# pretraining loss
lowerCAmelCase__ : Union[str, Any] = replace_prob
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 307 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[int] = logging.get_logger(__name__)
_lowercase : Tuple = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = "bridgetower_vision_model"
def __init__( self : Union[str, Any], lowerCamelCase : List[str]=768, lowerCamelCase : str=12, lowerCamelCase : List[Any]=3, lowerCamelCase : Union[str, Any]=16, lowerCamelCase : Dict=288, lowerCamelCase : List[Any]=1, lowerCamelCase : Optional[int]=1E-05, lowerCamelCase : Tuple=False, lowerCamelCase : List[Any]=True, lowerCamelCase : Dict=False, **lowerCamelCase : Optional[int], )-> Tuple:
super().__init__(**lowerCamelCase )
lowerCamelCase__ : Dict =hidden_size
lowerCamelCase__ : Dict =num_hidden_layers
lowerCamelCase__ : Tuple =num_channels
lowerCamelCase__ : Union[str, Any] =patch_size
lowerCamelCase__ : Dict =image_size
lowerCamelCase__ : str =initializer_factor
lowerCamelCase__ : Optional[int] =layer_norm_eps
lowerCamelCase__ : Optional[int] =stop_gradient
lowerCamelCase__ : List[Any] =share_layernorm
lowerCamelCase__ : Optional[int] =remove_last_layer
@classmethod
def snake_case ( cls : str, lowerCamelCase : Union[str, os.PathLike], **lowerCamelCase : Dict )-> "PretrainedConfig":
lowerCamelCase__ : Tuple =cls.get_config_dict(lowerCamelCase, **lowerCamelCase )
if config_dict.get('''model_type''' ) == "bridgetower":
lowerCamelCase__ : Union[str, Any] =config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase, **lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = "bridgetower_text_model"
def __init__( self : Optional[Any], lowerCamelCase : List[Any]=5_0265, lowerCamelCase : Union[str, Any]=768, lowerCamelCase : Union[str, Any]=12, lowerCamelCase : Tuple=12, lowerCamelCase : Union[str, Any]=1, lowerCamelCase : Tuple=3072, lowerCamelCase : List[Any]="gelu", lowerCamelCase : Dict=0.1, lowerCamelCase : List[Any]=0.1, lowerCamelCase : Tuple=514, lowerCamelCase : List[str]=1, lowerCamelCase : Tuple=1E-05, lowerCamelCase : Tuple=1, lowerCamelCase : List[Any]=0, lowerCamelCase : int=2, lowerCamelCase : Any="absolute", lowerCamelCase : Union[str, Any]=True, **lowerCamelCase : Tuple, )-> Optional[Any]:
super().__init__(**lowerCamelCase )
lowerCamelCase__ : Optional[Any] =vocab_size
lowerCamelCase__ : Tuple =hidden_size
lowerCamelCase__ : List[str] =num_hidden_layers
lowerCamelCase__ : int =num_attention_heads
lowerCamelCase__ : Optional[Any] =hidden_act
lowerCamelCase__ : Optional[Any] =initializer_factor
lowerCamelCase__ : Optional[int] =intermediate_size
lowerCamelCase__ : Optional[int] =hidden_dropout_prob
lowerCamelCase__ : Optional[Any] =attention_probs_dropout_prob
lowerCamelCase__ : Any =max_position_embeddings
lowerCamelCase__ : Union[str, Any] =type_vocab_size
lowerCamelCase__ : Tuple =layer_norm_eps
lowerCamelCase__ : Union[str, Any] =position_embedding_type
lowerCamelCase__ : int =use_cache
lowerCamelCase__ : Tuple =pad_token_id
lowerCamelCase__ : Union[str, Any] =bos_token_id
lowerCamelCase__ : Union[str, Any] =eos_token_id
@classmethod
def snake_case ( cls : List[Any], lowerCamelCase : Union[str, os.PathLike], **lowerCamelCase : List[str] )-> "PretrainedConfig":
lowerCamelCase__ : Any =cls.get_config_dict(lowerCamelCase, **lowerCamelCase )
if config_dict.get('''model_type''' ) == "bridgetower":
lowerCamelCase__ : Union[str, Any] =config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase, **lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = "bridgetower"
def __init__( self : Any, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Union[str, Any]="gelu", lowerCamelCase : Dict=768, lowerCamelCase : int=1, lowerCamelCase : Optional[int]=1E-05, lowerCamelCase : Dict=False, lowerCamelCase : Tuple="add", lowerCamelCase : int=12, lowerCamelCase : Tuple=6, lowerCamelCase : int=False, lowerCamelCase : Dict=False, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Tuple=None, **lowerCamelCase : str, )-> Optional[Any]:
lowerCamelCase__ : Any =kwargs.pop('''text_config_dict''', lowerCamelCase )
lowerCamelCase__ : Tuple =kwargs.pop('''vision_config_dict''', lowerCamelCase )
super().__init__(**lowerCamelCase )
lowerCamelCase__ : Any =share_cross_modal_transformer_layers
lowerCamelCase__ : int =hidden_act
lowerCamelCase__ : Optional[int] =hidden_size
lowerCamelCase__ : Optional[int] =initializer_factor
lowerCamelCase__ : List[Any] =layer_norm_eps
lowerCamelCase__ : Optional[int] =share_link_tower_layers
lowerCamelCase__ : Tuple =link_tower_type
lowerCamelCase__ : List[str] =num_attention_heads
lowerCamelCase__ : List[str] =num_hidden_layers
lowerCamelCase__ : List[str] =tie_word_embeddings
lowerCamelCase__ : Optional[Any] =init_layernorm_from_vision_encoder
if text_config is None:
lowerCamelCase__ : Optional[Any] ={}
logger.info('''`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.''' )
if vision_config is None:
lowerCamelCase__ : Union[str, Any] ={}
logger.info('''`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.''' )
lowerCamelCase__ : List[str] =BridgeTowerTextConfig(**lowerCamelCase )
lowerCamelCase__ : Tuple =BridgeTowerVisionConfig(**lowerCamelCase )
@classmethod
def snake_case ( cls : List[str], lowerCamelCase : BridgeTowerTextConfig, lowerCamelCase : BridgeTowerVisionConfig, **lowerCamelCase : Optional[Any] )-> Any:
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCamelCase )
def snake_case ( self : int )-> Tuple:
lowerCamelCase__ : Optional[Any] =copy.deepcopy(self.__dict__ )
lowerCamelCase__ : Dict =self.text_config.to_dict()
lowerCamelCase__ : Any =self.vision_config.to_dict()
lowerCamelCase__ : List[Any] =self.__class__.model_type
return output
| 238 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Dict = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class __a (lowerCamelCase ):
__a : Tuple = "roc_bert"
def __init__( self : Union[str, Any] , __magic_name__ : List[str]=3_05_22 , __magic_name__ : Tuple=7_68 , __magic_name__ : Any=12 , __magic_name__ : Optional[Any]=12 , __magic_name__ : Union[str, Any]=30_72 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : Any=5_12 , __magic_name__ : str=2 , __magic_name__ : Any=0.0_2 , __magic_name__ : Dict=1E-12 , __magic_name__ : int=True , __magic_name__ : Optional[int]=0 , __magic_name__ : str="absolute" , __magic_name__ : Tuple=None , __magic_name__ : Any=True , __magic_name__ : Optional[Any]=True , __magic_name__ : List[str]=7_68 , __magic_name__ : List[Any]=9_10 , __magic_name__ : Tuple=5_12 , __magic_name__ : Dict=2_48_58 , __magic_name__ : Any=True , **__magic_name__ : Union[str, Any] , ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : int = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : Optional[Any] = type_vocab_size
UpperCAmelCase_ : str = layer_norm_eps
UpperCAmelCase_ : Tuple = use_cache
UpperCAmelCase_ : Optional[int] = enable_pronunciation
UpperCAmelCase_ : Union[str, Any] = enable_shape
UpperCAmelCase_ : List[str] = pronunciation_embed_dim
UpperCAmelCase_ : List[str] = pronunciation_vocab_size
UpperCAmelCase_ : int = shape_embed_dim
UpperCAmelCase_ : Optional[int] = shape_vocab_size
UpperCAmelCase_ : Optional[Any] = concat_input
UpperCAmelCase_ : Dict = position_embedding_type
UpperCAmelCase_ : Union[str, Any] = classifier_dropout
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
| 125 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowerCamelCase :List[Any] = None
lowerCamelCase :Tuple = logging.get_logger(__name__)
lowerCamelCase :Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase :int = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
lowerCamelCase :List[Any] = {
'''camembert-base''': 5_1_2,
}
lowerCamelCase :List[str] = '''▁'''
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Dict = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : List[Any] = CamembertTokenizer
def __init__(self , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=["<s>NOTUSED", "</s>NOTUSED"] , **lowercase , ):
# Mask token behave like a normal word, i.e. include the space before it
A_ : str = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
lowercase , tokenizer_file=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , additional_special_tokens=lowercase , **lowercase , )
A_ : Any = vocab_file
A_ : Union[str, Any] = False if not self.vocab_file else True
def _a (self , lowercase , lowercase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : List[Any] = [self.cls_token_id]
A_ : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a (self , lowercase , lowercase = None ):
A_ : int = [self.sep_token_id]
A_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a (self , lowercase , lowercase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowercase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : Optional[int] = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,) | 135 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _lowerCAmelCase :
@property
def _a (self ):
return self.get_dummy_input()
@property
def _a (self ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' )
def _a (self , lowercase=True , lowercase=False , lowercase=False , lowercase=False , ):
A_ : List[str] = 4
A_ : int = 32
A_ : Optional[int] = (32, 32)
A_ : Optional[Any] = torch.manual_seed(0 )
A_ : int = torch.device(lowercase )
A_ : int = (batch_size, num_channels) + sizes
A_ : Optional[int] = randn_tensor(lowercase , generator=lowercase , device=lowercase )
A_ : Union[str, Any] = {"""hidden_states""": hidden_states}
if include_temb:
A_ : str = 128
A_ : List[Any] = randn_tensor((batch_size, temb_channels) , generator=lowercase , device=lowercase )
if include_res_hidden_states_tuple:
A_ : List[str] = torch.manual_seed(1 )
A_ : int = (randn_tensor(lowercase , generator=lowercase , device=lowercase ),)
if include_encoder_hidden_states:
A_ : List[str] = floats_tensor((batch_size, 32, 32) ).to(lowercase )
if include_skip_sample:
A_ : Dict = randn_tensor(((batch_size, 3) + sizes) , generator=lowercase , device=lowercase )
return dummy_input
def _a (self ):
A_ : Tuple = {
"""in_channels""": 32,
"""out_channels""": 32,
"""temb_channels""": 128,
}
if self.block_type == "up":
A_ : Any = 32
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
A_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _a (self , lowercase ):
A_, A_ : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
A_ : int = self.block_class(**lowercase )
unet_block.to(lowercase )
unet_block.eval()
with torch.no_grad():
A_ : List[str] = unet_block(**lowercase )
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = output[0]
self.assertEqual(output.shape , self.output_shape )
A_ : int = output[0, -1, -3:, -3:]
A_ : List[Any] = torch.tensor(lowercase ).to(lowercase )
assert torch_all_close(output_slice.flatten() , lowercase , atol=5E-3 )
@unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" )
def _a (self ):
A_, A_ : Tuple = self.prepare_init_args_and_inputs_for_common()
A_ : List[str] = self.block_class(**lowercase )
model.to(lowercase )
model.train()
A_ : Any = model(**lowercase )
if isinstance(lowercase , lowercase ):
A_ : str = output[0]
A_ : Union[str, Any] = torch.device(lowercase )
A_ : Tuple = randn_tensor(output.shape , device=lowercase )
A_ : List[str] = torch.nn.functional.mse_loss(lowercase , lowercase )
loss.backward() | 135 | 1 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _lowerCamelCase :
'''simple docstring'''
def __lowerCAmelCase ( self : Union[str, Any] , _A : Optional[Any] , _A : Union[str, Any] , _A : str ) -> Any:
return None
class _lowerCamelCase :
'''simple docstring'''
def __lowerCAmelCase ( self : List[Any] , _A : Optional[Any] , _A : Tuple , _A : Optional[Any] , _A : List[Any] ) -> Dict:
return None
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ : List[str] = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __lowerCAmelCase ( self : Dict ) -> List[str]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_A , 'tf' , 12 , **_A )
@require_torch
@slow
def __lowerCAmelCase ( self : Any ) -> Any:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_A , 'pt' , 12 , **_A )
@require_torch
@slow
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
from transformers import BertModel
__magic_name__ : Tuple = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(_A ) )
vocab_file.flush()
__magic_name__ : int = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
__magic_name__ : Tuple = BertModel(BertConfig(vocab_size=len(_A ) ) )
model.save_pretrained(_A )
self._test_export(_A , 'pt' , 12 , _A )
@require_tf
@slow
def __lowerCAmelCase ( self : Optional[int] ) -> Any:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__magic_name__ : Tuple = self._test_export(_A , 'tf' , 12 , **_A )
__magic_name__ : Union[str, Any] = quantize(Path(_A ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_A ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__magic_name__ : Union[str, Any] = self._test_export(_A , 'pt' , 12 , **_A )
__magic_name__ : List[Any] = quantize(_A )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_A ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : str , _A : Tuple , _A : str=None , **_A : Optional[int] ) -> Union[str, Any]:
try:
# Compute path
with TemporaryDirectory() as tempdir:
__magic_name__ : int = Path(_A ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(_A , _A , _A , _A , _A , **_A )
return path
except Exception as e:
self.fail(_A )
@require_torch
@require_tokenizers
@slow
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
from transformers import BertModel
__magic_name__ : str = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
__magic_name__ : int = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(_A , _A , 'pt' )
@require_tf
@require_tokenizers
@slow
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
from transformers import TFBertModel
__magic_name__ : int = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
__magic_name__ : Optional[Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(_A , _A , 'tf' )
def __lowerCAmelCase ( self : Union[str, Any] , _A : int , _A : str , _A : Optional[int] ) -> Union[str, Any]:
__magic_name__ : List[Any] = FeatureExtractionPipeline(_A , _A )
__magic_name__ : str = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[str] = infer_shapes(_A , _A )
# Assert all variables are present
self.assertEqual(len(_A ) , len(_A ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , _A )
self.assertSequenceEqual(variable_names[3:] , _A )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def __lowerCAmelCase ( self : str ) -> int:
__magic_name__ : Union[str, Any] = ['input_ids', 'attention_mask', 'token_type_ids']
__magic_name__ : List[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
__magic_name__ , __magic_name__ : Optional[Any] = ensure_valid_input(FuncContiguousArgs() , _A , _A )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(_A ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(_A ) , set(_A ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(_A , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
__magic_name__ , __magic_name__ : Any = ensure_valid_input(FuncNonContiguousArgs() , _A , _A )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(_A ) , 1 )
self.assertEqual(len(_A ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def __lowerCAmelCase ( self : int ) -> Any:
__magic_name__ : List[str] = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() ) | 331 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase :Tuple = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> Any:
super().__init__(*_A , **_A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __lowerCAmelCase ( self : str , _A : Any=None , _A : Union[str, Any]=None , _A : Union[str, Any]=None ) -> List[str]:
__magic_name__ : Union[str, Any] = {}
__magic_name__ : Optional[Any] = {}
if prompt is not None:
__magic_name__ : Union[str, Any] = prompt
if generate_kwargs is not None:
__magic_name__ : str = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__magic_name__ : Union[str, Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
__magic_name__ : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , _A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_A : List[Any] ) -> int:
return super().__call__(_A , **_A )
def __lowerCAmelCase ( self : List[str] , _A : str , _A : Optional[int]=None ) -> Dict:
__magic_name__ : List[Any] = load_image(_A )
if prompt is not None:
if not isinstance(_A , _A ):
raise ValueError(
F'Received an invalid text input, got - {type(_A )} - but expected a single string. '
'Note also that one single text can be provided for conditional image to text generation.' )
__magic_name__ : Any = self.model.config.model_type
if model_type == "git":
__magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework )
__magic_name__ : List[str] = self.tokenizer(text=_A , add_special_tokens=_A ).input_ids
__magic_name__ : str = [self.tokenizer.cls_token_id] + input_ids
__magic_name__ : List[Any] = torch.tensor(_A ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
__magic_name__ : Dict = self.image_processor(images=_A , header_text=_A , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework )
__magic_name__ : List[str] = self.tokenizer(_A , return_tensors=self.framework )
model_inputs.update(_A )
else:
raise ValueError(F'Model type {model_type} does not support conditional text generation' )
else:
__magic_name__ : Optional[Any] = self.image_processor(images=_A , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__magic_name__ : int = None
return model_inputs
def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : List[str]=None ) -> Any:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , _A )
and all(x is None for x in model_inputs['input_ids'] )
):
__magic_name__ : str = None
if generate_kwargs is None:
__magic_name__ : Optional[int] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__magic_name__ : Optional[Any] = model_inputs.pop(self.model.main_input_name )
__magic_name__ : Union[str, Any] = self.model.generate(_A , **_A , **_A )
return model_outputs
def __lowerCAmelCase ( self : List[str] , _A : Tuple ) -> Optional[Any]:
__magic_name__ : Optional[Any] = []
for output_ids in model_outputs:
__magic_name__ : Union[str, Any] = {
'generated_text': self.tokenizer.decode(
_A , skip_special_tokens=_A , )
}
records.append(_A )
return records | 331 | 1 |
from ..utils import DummyObject, requires_backends
class lowercase_ ( metaclass=lowercase ):
'''simple docstring'''
__snake_case = ['''note_seq''']
def __init__( self : int , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def __lowerCAmelCase ( cls : Optional[int] , *__UpperCAmelCase : Dict , **__UpperCAmelCase : Optional[int] ) ->List[Any]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def __lowerCAmelCase ( cls : int , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : Dict ) ->List[str]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 369 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
UpperCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _a ( a :str ) -> Any:
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
a = model_type_to_module_name(a )
a = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(a , a )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(a , '''__name__''' , a ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
a = importlib.import_module('''transformers''' )
if hasattr(a , a ):
return getattr(a , a )
return None
def _a ( a :Union[str, os.PathLike] , a :Optional[Union[str, os.PathLike]] = None , a :bool = False , a :bool = False , a :Optional[Dict[str, str]] = None , a :Optional[Union[bool, str]] = None , a :Optional[str] = None , a :bool = False , **a :int , ) -> Tuple:
a = get_file_from_repo(
a , a , cache_dir=a , force_download=a , resume_download=a , proxies=a , use_auth_token=a , revision=a , local_files_only=a , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(a , encoding='''utf-8''' ) as reader:
return json.load(a )
class lowercase_ :
'''simple docstring'''
def __init__( self : Tuple ) ->int:
"""simple docstring"""
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(__UpperCAmelCase )
def __lowerCAmelCase ( cls : int , __UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Dict ) ->List[Any]:
"""simple docstring"""
a = kwargs.pop('''config''' , __UpperCAmelCase )
a = kwargs.pop('''trust_remote_code''' , __UpperCAmelCase )
a = True
a , a = FeatureExtractionMixin.get_feature_extractor_dict(__UpperCAmelCase , **__UpperCAmelCase )
a = config_dict.get('''feature_extractor_type''' , __UpperCAmelCase )
a = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
a = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a = AutoConfig.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
# It could be in `config.feature_extractor_type``
a = getattr(__UpperCAmelCase , '''feature_extractor_type''' , __UpperCAmelCase )
if hasattr(__UpperCAmelCase , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
a = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
a = feature_extractor_class_from_name(__UpperCAmelCase )
a = feature_extractor_auto_map is not None
a = feature_extractor_class is not None or type(__UpperCAmelCase ) in FEATURE_EXTRACTOR_MAPPING
a = resolve_trust_remote_code(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if has_remote_code and trust_remote_code:
a = get_class_from_dynamic_module(
__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
a = kwargs.pop('''code_revision''' , __UpperCAmelCase )
if os.path.isdir(__UpperCAmelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__UpperCAmelCase ) in FEATURE_EXTRACTOR_MAPPING:
a = FEATURE_EXTRACTOR_MAPPING[type(__UpperCAmelCase )]
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowerCAmelCase ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple ) ->Optional[int]:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(__UpperCAmelCase , __UpperCAmelCase )
| 26 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_lowerCamelCase : Union[str, Any] = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowercase_ ( _UpperCAmelCase = "mumbai" ):
"""simple docstring"""
A_ : Union[str, Any] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
A_ : Dict = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
A_ : Dict = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f'Job {i:>2} is {job[0]} at {job[1]}')
| 167 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : str = {'vocab_file': 'spiece.model'}
_lowerCamelCase : Optional[int] = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
_lowerCamelCase : str = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : Tuple = 1
_lowerCamelCase : int = 2
_lowerCamelCase : Dict = 3
_lowerCamelCase : Union[str, Any] = 4
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Any = """left"""
def __init__( self : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : str=False , _lowerCamelCase : Optional[Any]="<s>" , _lowerCamelCase : List[str]="</s>" , _lowerCamelCase : Union[str, Any]="<unk>" , _lowerCamelCase : List[Any]="<sep>" , _lowerCamelCase : str="<pad>" , _lowerCamelCase : Dict="<cls>" , _lowerCamelCase : str="<mask>" , _lowerCamelCase : Optional[int]=["<eop>", "<eod>"] , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
A_ : Optional[int] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A_ : str = 3
A_ : Union[str, Any] = do_lower_case
A_ : Tuple = remove_space
A_ : int = keep_accents
A_ : Optional[Any] = vocab_file
A_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@property
def a_ ( self : int ):
"""simple docstring"""
return len(self.sp_model )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
"""simple docstring"""
A_ : str = self.__dict__.copy()
A_ : Tuple = None
return state
def __setstate__( self : Tuple , _lowerCamelCase : int ):
"""simple docstring"""
A_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : List[Any] = {}
A_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
if self.remove_space:
A_ : str = ''' '''.join(inputs.strip().split() )
else:
A_ : Any = inputs
A_ : List[str] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
A_ : Any = unicodedata.normalize('''NFKD''' , _lowerCamelCase )
A_ : List[str] = ''''''.join([c for c in outputs if not unicodedata.combining(_lowerCamelCase )] )
if self.do_lower_case:
A_ : str = outputs.lower()
return outputs
def a_ ( self : List[str] , _lowerCamelCase : str ):
"""simple docstring"""
A_ : str = self.preprocess_text(_lowerCamelCase )
A_ : int = self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
A_ : List[Any] = []
for piece in pieces:
if len(_lowerCamelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
A_ : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCamelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A_ : Tuple = cur_pieces[1:]
else:
A_ : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_lowerCamelCase )
else:
new_pieces.append(_lowerCamelCase )
return new_pieces
def a_ ( self : Any , _lowerCamelCase : List[Any] ):
"""simple docstring"""
return self.sp_model.PieceToId(_lowerCamelCase )
def a_ ( self : Any , _lowerCamelCase : List[Any] ):
"""simple docstring"""
return self.sp_model.IdToPiece(_lowerCamelCase )
def a_ ( self : List[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : Any = ''''''.join(_lowerCamelCase ).replace(_lowerCamelCase , ''' ''' ).strip()
return out_string
def a_ ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : bool = False , _lowerCamelCase : bool = None , _lowerCamelCase : bool = True , **_lowerCamelCase : int , ):
"""simple docstring"""
A_ : int = kwargs.pop('''use_source_tokenizer''' , _lowerCamelCase )
A_ : List[str] = self.convert_ids_to_tokens(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ : Any = []
A_ : List[Any] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCamelCase ) )
A_ : int = []
sub_texts.append(_lowerCamelCase )
else:
current_sub_text.append(_lowerCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
A_ : Optional[int] = ''''''.join(_lowerCamelCase )
A_ : Any = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ : Dict = self.clean_up_tokenization(_lowerCamelCase )
return clean_text
else:
return text
def a_ ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
A_ : Optional[int] = [self.sep_token_id]
A_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def a_ ( self : List[str] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is not None:
return ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1, 1]
return ([0] * len(_lowerCamelCase )) + [1, 1]
def a_ ( self : int , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
A_ : List[Any] = [self.sep_token_id]
A_ : Tuple = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def a_ ( self : int , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A_ : List[str] = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
A_ : str = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 167 | 1 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowerCAmelCase_ = TypeVar("KT")
lowerCAmelCase_ = TypeVar("VT")
class lowerCamelCase ( Generic[KT, VT] ):
def __init__( self, lowercase_ = "root", lowercase_ = None ) -> Optional[int]:
snake_case = key
snake_case = value
snake_case = []
def __repr__( self ) -> str:
return F'''Node({self.key}: {self.value})'''
@property
def _lowerCamelCase ( self ) -> int:
return len(self.forward )
class lowerCamelCase ( Generic[KT, VT] ):
def __init__( self, lowercase_ = 0.5, lowercase_ = 16 ) -> Any:
snake_case = Node[KT, VT]()
snake_case = 0
snake_case = p
snake_case = max_level
def __str__( self ) -> str:
snake_case = list(self )
if len(lowercase_ ) == 0:
return F'''SkipList(level={self.level})'''
snake_case = max((len(str(lowercase_ ) ) for item in items), default=4 )
snake_case = max(lowercase_, 4 ) + 4
snake_case = self.head
snake_case = []
snake_case = node.forward.copy()
lines.append(F'''[{node.key}]'''.ljust(lowercase_, '-' ) + '* ' * len(lowercase_ ) )
lines.append(' ' * label_size + '| ' * len(lowercase_ ) )
while len(node.forward ) != 0:
snake_case = node.forward[0]
lines.append(
F'''[{node.key}]'''.ljust(lowercase_, '-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(lowercase_ ) )
snake_case = node.forward
lines.append('None'.ljust(lowercase_ ) + '* ' * len(lowercase_ ) )
return F'''SkipList(level={self.level})\n''' + "\n".join(lowercase_ )
def __iter__( self ) -> Optional[int]:
snake_case = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
snake_case = node.forward[0]
def _lowerCamelCase ( self ) -> int:
snake_case = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _lowerCamelCase ( self, lowercase_ ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
snake_case = []
snake_case = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
snake_case = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(lowercase_ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _lowerCamelCase ( self, lowercase_ ) -> Optional[int]:
snake_case , snake_case = self._locate_node(lowercase_ )
if node is not None:
for i, update_node in enumerate(lowercase_ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
snake_case = node.forward[i]
else:
snake_case = update_node.forward[:i]
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> Union[str, Any]:
snake_case , snake_case = self._locate_node(lowercase_ )
if node is not None:
snake_case = value
else:
snake_case = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1, lowercase_ ):
update_vector.append(self.head )
snake_case = level
snake_case = Node(lowercase_, lowercase_ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(lowercase_ )
else:
snake_case = new_node
def _lowerCamelCase ( self, lowercase_ ) -> VT | None:
snake_case , snake_case = self._locate_node(lowercase_ )
if node is not None:
return node.value
return None
def __magic_name__ ( ) -> str:
snake_case = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 1_2 )
skip_list.insert('Key3' , 4_1 )
skip_list.insert('Key4' , -1_9 )
snake_case = skip_list.head
snake_case = {}
while node.level != 0:
snake_case = node.forward[0]
snake_case = node.value
assert len(A ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def __magic_name__ ( ) -> Optional[int]:
snake_case = SkipList()
skip_list.insert('Key1' , 1_0 )
skip_list.insert('Key1' , 1_2 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 1_0 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 1_0 )
snake_case = skip_list.head
snake_case = {}
while node.level != 0:
snake_case = node.forward[0]
snake_case = node.value
if len(A ) != 4:
print()
assert len(A ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def __magic_name__ ( ) -> Dict:
snake_case = SkipList()
assert skip_list.find('Some key' ) is None
def __magic_name__ ( ) -> int:
snake_case = SkipList()
skip_list.insert('Key2' , 2_0 )
assert skip_list.find('Key2' ) == 2_0
skip_list.insert('Some Key' , 1_0 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 1_3 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 1_0
assert skip_list.find('V' ) == 1_3
def __magic_name__ ( ) -> Dict:
snake_case = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def __magic_name__ ( ) -> List[str]:
snake_case = SkipList()
skip_list.insert('Key1' , 1_2 )
skip_list.insert('V' , 1_3 )
skip_list.insert('X' , 1_4 )
skip_list.insert('Key2' , 1_5 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def __magic_name__ ( ) -> Optional[Any]:
snake_case = SkipList()
skip_list.insert('Key1' , 1_2 )
skip_list.insert('V' , 1_3 )
skip_list.insert('X' , 1_4 )
skip_list.insert('Key2' , 1_5 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 1_4
assert skip_list.find('Key1' ) == 1_2
assert skip_list.find('Key2' ) == 1_5
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 1_2
assert skip_list.find('Key2' ) == 1_5
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 1_5
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def __magic_name__ ( ) -> Optional[Any]:
snake_case = SkipList()
skip_list.insert('Key1' , 1_2 )
skip_list.insert('V' , 1_3 )
skip_list.insert('X' , 1_4_2 )
skip_list.insert('Key2' , 1_5 )
skip_list.delete('X' )
def traverse_keys(A ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(A )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __magic_name__ ( ) -> Optional[Any]:
def is_sorted(A ):
return all(next_item >= item for item, next_item in zip(A , lst[1:] ) )
snake_case = SkipList()
for i in range(1_0 ):
skip_list.insert(A , A )
assert is_sorted(list(A ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(A ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(A ) )
def __magic_name__ ( ) -> Dict:
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __magic_name__ ( ) -> Dict:
snake_case = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 332 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCAmelCase_ = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def __magic_name__ ( A , A ) -> Union[str, Any]:
inspect_dataset(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def __magic_name__ ( A , A ) -> int:
inspect_metric(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_config_info(A , config_name=A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> Any:
with pytest.raises(A ):
get_dataset_config_info(A , config_name=A )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def __magic_name__ ( A , A ) -> Dict:
snake_case = get_dataset_config_names(A )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_infos(A )
assert list(infos.keys() ) == expected_configs
snake_case = expected_configs[0]
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> Any:
snake_case = get_dataset_infos(A )
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> int:
with pytest.raises(A ):
get_dataset_split_names(A , config_name=A )
| 332 | 1 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A : int = logging.get_logger(__name__)
__A : str = '▁'
__A : str = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
__A : Optional[int] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
__A : Optional[Any] = {
'facebook/s2t-small-librispeech-asr': 10_24,
}
__A : Optional[Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
__A : Optional[Any] = {'mustc': MUSTC_LANGS}
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase):
_UpperCamelCase:List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase:Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase:Tuple = MAX_MODEL_INPUT_SIZES
_UpperCamelCase:Optional[int] = ["input_ids", "attention_mask"]
_UpperCamelCase:Optional[Any] = []
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> None:
lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , do_upper_case=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , tgt_lang=SCREAMING_SNAKE_CASE_ , lang_codes=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ =do_upper_case
lowerCamelCase_ =do_lower_case
lowerCamelCase_ =load_json(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ ={v: k for k, v in self.encoder.items()}
lowerCamelCase_ =spm_file
lowerCamelCase_ =load_spm(SCREAMING_SNAKE_CASE_ , self.sp_model_kwargs )
if lang_codes is not None:
lowerCamelCase_ =lang_codes
lowerCamelCase_ =LANGUAGES[lang_codes]
lowerCamelCase_ =[f'<lang:{lang}>' for lang in self.langs]
lowerCamelCase_ ={lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs}
lowerCamelCase_ =self.lang_tokens
lowerCamelCase_ =tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
lowerCamelCase_ ={}
@property
def _snake_case ( self )-> int:
return len(self.encoder )
@property
def _snake_case ( self )-> str:
return self._tgt_lang
@tgt_lang.setter
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> None:
lowerCamelCase_ =new_tgt_lang
self.set_tgt_lang_special_tokens(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> None:
lowerCamelCase_ =self.lang_code_to_id[tgt_lang]
lowerCamelCase_ =[lang_code_id]
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Union[str, Any]:
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder[self.unk_token] )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> str:
return self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> str:
lowerCamelCase_ =[]
lowerCamelCase_ =""""""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
lowerCamelCase_ =self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
lowerCamelCase_ =[]
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None )-> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =[1] * len(self.prefix_tokens )
lowerCamelCase_ =[1]
if token_ids_a is None:
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE_ )) + ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones
def _snake_case ( self )-> Dict:
lowerCamelCase_ =self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self )-> Dict:
lowerCamelCase_ =self.__dict__.copy()
lowerCamelCase_ =None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE )-> None:
lowerCamelCase_ =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase_ ={}
lowerCamelCase_ =load_spm(self.spm_file , self.sp_model_kwargs )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> Tuple[str]:
lowerCamelCase_ =Path(SCREAMING_SNAKE_CASE_ )
assert save_dir.is_dir(), f'{save_directory} should be a directory'
lowerCamelCase_ =save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
lowerCamelCase_ =save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , SCREAMING_SNAKE_CASE_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.spm_file ):
with open(SCREAMING_SNAKE_CASE_ , """wb""" ) as fi:
lowerCamelCase_ =self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (str(SCREAMING_SNAKE_CASE_ ), str(SCREAMING_SNAKE_CASE_ ))
def __UpperCamelCase ( _A : str , _A : Dict[str, Any] ) ->Any:
"""simple docstring"""
lowerCamelCase_ =sentencepiece.SentencePieceProcessor(**lowerCAmelCase__ )
spm.Load(str(lowerCAmelCase__ ) )
return spm
def __UpperCamelCase ( _A : str ) ->int:
"""simple docstring"""
with open(lowerCAmelCase__ , """r""" ) as f:
return json.load(lowerCAmelCase__ )
def __UpperCamelCase ( _A : Dict , _A : str ) ->Optional[int]:
"""simple docstring"""
with open(lowerCAmelCase__ , """w""" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , indent=2 )
| 154 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, ) -> Optional[Any]:
UpperCAmelCase_: Optional[int] = parent
UpperCAmelCase_: List[Any] = 13
UpperCAmelCase_: Union[str, Any] = 7
UpperCAmelCase_: Optional[Any] = True
UpperCAmelCase_: Tuple = True
UpperCAmelCase_: Dict = True
UpperCAmelCase_: str = 99
UpperCAmelCase_: Tuple = 32
UpperCAmelCase_: Optional[int] = 2
UpperCAmelCase_: Union[str, Any] = 4
UpperCAmelCase_: List[Any] = 37
UpperCAmelCase_: str = """gelu"""
UpperCAmelCase_: Dict = 0.1
UpperCAmelCase_: Optional[Any] = 0.1
UpperCAmelCase_: Optional[Any] = 512
UpperCAmelCase_: List[str] = 16
UpperCAmelCase_: Any = 2
UpperCAmelCase_: Union[str, Any] = 0.0_2
UpperCAmelCase_: List[str] = 3
UpperCAmelCase_: Tuple = 4
UpperCAmelCase_: Any = None
def __snake_case (self ) -> str:
UpperCAmelCase_: List[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase_: Optional[int] = None
if self.use_input_mask:
UpperCAmelCase_: Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_: List[Any] = None
UpperCAmelCase_: Optional[int] = None
UpperCAmelCase_: Tuple = None
if self.use_labels:
UpperCAmelCase_: List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase_: int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCAmelCase_: List[Any] = ids_tensor([self.batch_size], self.num_choices )
UpperCAmelCase_: List[str] = EsmConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, pad_token_id=1, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case (self ) -> Optional[int]:
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
): Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_: Dict = True
UpperCAmelCase_: Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_: Dict = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCAmelCase_: Optional[int] = TFEsmModel(config=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
UpperCAmelCase_: Dict = model(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = [input_ids, input_mask]
UpperCAmelCase_: Dict = model(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> List[str]:
UpperCAmelCase_: Tuple = True
UpperCAmelCase_: List[Any] = TFEsmModel(config=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
UpperCAmelCase_: Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = [input_ids, input_mask]
UpperCAmelCase_: List[Any] = model(SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_ )
# Also check the case where encoder outputs are not passed
UpperCAmelCase_: Tuple = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCAmelCase_: List[Any] = TFEsmForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCAmelCase_: int = self.num_labels
UpperCAmelCase_: Dict = TFEsmForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
UpperCAmelCase_: Tuple = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case (self ) -> Any:
UpperCAmelCase_: Dict = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
): str = config_and_inputs
UpperCAmelCase_: int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
A = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: List[Any] = TFEsmModelTester(self )
UpperCAmelCase_: Union[str, Any] = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def __snake_case (self ) -> Any:
self.config_tester.run_common_tests()
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case (self ) -> str:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_: Dict = TFEsmModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def __snake_case (self ) -> Tuple:
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def __snake_case (self ) -> Optional[Any]:
pass
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
UpperCAmelCase_: Any = model.get_bias()
assert isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
for k, v in name.items():
assert isinstance(SCREAMING_SNAKE_CASE_, tf.Variable )
else:
UpperCAmelCase_: Union[str, Any] = model.get_output_embeddings()
assert x is None
UpperCAmelCase_: Optional[int] = model.get_bias()
assert name is None
@require_tf
class _a ( unittest.TestCase ):
@slow
def __snake_case (self ) -> str:
UpperCAmelCase_: str = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
UpperCAmelCase_: Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase_: int = model(SCREAMING_SNAKE_CASE_ )[0]
UpperCAmelCase_: Optional[int] = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ), SCREAMING_SNAKE_CASE_ )
# compare the actual values for a slice.
UpperCAmelCase_: List[str] = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1E-2 ) )
@slow
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: List[Any] = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
UpperCAmelCase_: Any = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase_: Optional[int] = model(SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
UpperCAmelCase_: str = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1E-4 ) )
| 147 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class lowerCAmelCase__ ( A_ ):
__a = """microsoft/speecht5_tts"""
__a = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
__a = """text_reader"""
__a = SpeechTaProcessor
__a = SpeechTaForTextToSpeech
__a = SpeechTaHifiGan
__a = ["""text"""]
__a = ["""audio"""]
def lowercase ( self : List[Any] ):
if self.post_processor is None:
_snake_case = '''microsoft/speecht5_hifigan'''
super().setup()
def lowercase ( self : str , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str]=None ):
_snake_case = self.pre_processor(text=_lowerCamelCase , return_tensors='''pt''' , truncation=_lowerCamelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
_snake_case = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
_snake_case = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def lowercase ( self : Dict , _lowerCamelCase : int ):
with torch.no_grad():
return self.model.generate_speech(**_lowerCamelCase )
def lowercase ( self : Union[str, Any] , _lowerCamelCase : Any ):
with torch.no_grad():
return self.post_processor(_lowerCamelCase ).cpu().detach()
| 40 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase__ = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase__ = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _UpperCAmelCase ( __lowerCamelCase : str ) -> str:
re.sub('''<n>''' , '''''' , __lowerCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowerCamelCase ) )
| 40 | 1 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str]=False ) -> Optional[int]:
'''simple docstring'''
try:
A__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A__ = default
else:
# KEY is set, convert it to True or False.
try:
A__ = strtobool(SCREAMING_SNAKE_CASE__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
lowercase_ = parse_flag_from_env("RUN_SLOW", default=False)
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
'''simple docstring'''
return unittest.skip('Test was skipped' )(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> int:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
if test_case is None:
return partial(SCREAMING_SNAKE_CASE__ , version=SCREAMING_SNAKE_CASE__ )
return unittest.skipUnless(is_torch_version('>=' , SCREAMING_SNAKE_CASE__ ) , f'test requires torch version >= {version}' )(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> Any:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(SCREAMING_SNAKE_CASE__ )
lowercase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> int:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(SCREAMING_SNAKE_CASE__ )
class A ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = True
@classmethod
def snake_case__ ( cls : List[str] )-> List[str]:
'''simple docstring'''
A__ = tempfile.mkdtemp()
@classmethod
def snake_case__ ( cls : int )-> str:
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def snake_case__ ( self : Dict )-> List[str]:
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowercase_ )
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : str )-> Optional[int]:
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Dict,lowercase_ : Union[mock.Mock, List[mock.Mock]] )-> str:
'''simple docstring'''
A__ = mocks if isinstance(lowercase_,(tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
'''simple docstring'''
A__ = AcceleratorState()
A__ = tensor[None].clone().to(state.device )
A__ = gather(SCREAMING_SNAKE_CASE__ ).cpu()
A__ = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , SCREAMING_SNAKE_CASE__ ):
return False
return True
class A :
"""simple docstring"""
def __init__( self : Tuple,lowercase_ : Tuple,lowercase_ : Dict,lowercase_ : str )-> str:
'''simple docstring'''
A__ = returncode
A__ = stdout
A__ = stderr
async def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
'''simple docstring'''
while True:
A__ = await stream.readline()
if line:
callback(SCREAMING_SNAKE_CASE__ )
else:
break
async def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(SCREAMING_SNAKE_CASE__ ) )
A__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=SCREAMING_SNAKE_CASE__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=SCREAMING_SNAKE_CASE__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A__ = []
A__ = []
def tee(SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict="" ):
A__ = line.decode('utf-8' ).rstrip()
sink.append(SCREAMING_SNAKE_CASE__ )
if not quiet:
print(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , file=SCREAMING_SNAKE_CASE__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda SCREAMING_SNAKE_CASE__ : tee(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda SCREAMING_SNAKE_CASE__ : tee(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , sys.stderr , label='stderr:' ) ) ),
] , timeout=SCREAMING_SNAKE_CASE__ , )
return _RunOutput(await p.wait() , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : List[str]=180 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : List[Any]=True ) -> _RunOutput:
'''simple docstring'''
A__ = asyncio.get_event_loop()
A__ = loop.run_until_complete(
_stream_subprocess(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , stdin=SCREAMING_SNAKE_CASE__ , timeout=SCREAMING_SNAKE_CASE__ , quiet=SCREAMING_SNAKE_CASE__ , echo=SCREAMING_SNAKE_CASE__ ) )
A__ = ' '.join(SCREAMING_SNAKE_CASE__ )
if result.returncode > 0:
A__ = '\n'.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class A ( _UpperCAmelCase ):
"""simple docstring"""
pass
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int=False ) -> List[str]:
'''simple docstring'''
try:
A__ = subprocess.check_output(SCREAMING_SNAKE_CASE__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(SCREAMING_SNAKE_CASE__ , 'decode' ):
A__ = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(SCREAMING_SNAKE_CASE__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 7 |
from typing import Dict
from .base import GenericTensor, Pipeline
class A ( _UpperCAmelCase ):
"""simple docstring"""
def snake_case__ ( self : int,lowercase_ : Dict=None,lowercase_ : Tuple=None,lowercase_ : List[Any]=None,**lowercase_ : Any )-> Optional[Any]:
'''simple docstring'''
if tokenize_kwargs is None:
A__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
A__ = truncation
A__ = tokenize_kwargs
A__ = {}
if return_tensors is not None:
A__ = return_tensors
return preprocess_params, {}, postprocess_params
def snake_case__ ( self : Dict,lowercase_ : List[Any],**lowercase_ : Tuple )-> Dict[str, GenericTensor]:
'''simple docstring'''
A__ = self.framework
A__ = self.tokenizer(lowercase_,return_tensors=lowercase_,**lowercase_ )
return model_inputs
def snake_case__ ( self : Tuple,lowercase_ : int )-> Optional[Any]:
'''simple docstring'''
A__ = self.model(**lowercase_ )
return model_outputs
def snake_case__ ( self : Tuple,lowercase_ : Tuple,lowercase_ : List[str]=False )-> Any:
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : List[Any],*lowercase_ : int,**lowercase_ : Optional[Any] )-> int:
'''simple docstring'''
return super().__call__(*lowercase_,**lowercase_ )
| 7 | 1 |
'''simple docstring'''
import heapq
import sys
import numpy as np
__SCREAMING_SNAKE_CASE :Optional[int] = tuple[int, int]
class A_ :
def __init__( self : List[str] ):
_UpperCAmelCase = []
_UpperCAmelCase = set()
def lowercase ( self : Union[str, Any] ):
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def lowercase ( self : str ):
return len(self.elements ) == 0
def lowercase ( self : Optional[int] , snake_case_ : List[Any] , snake_case_ : List[str] ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(UpperCamelCase__ )
else:
# update
# print("update", item)
_UpperCAmelCase = []
((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def lowercase ( self : Any , snake_case_ : int ):
if item in self.set:
self.set.remove(UpperCamelCase__ )
_UpperCAmelCase = []
((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def lowercase ( self : List[str] ):
return self.elements[0][1]
def lowercase ( self : Dict ):
((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements )
self.set.remove(UpperCamelCase__ )
return (priority, item)
def UpperCAmelCase_ ( __lowercase : TPos , __lowercase : TPos ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = np.array(__UpperCamelCase )
_UpperCAmelCase = np.array(__UpperCamelCase )
return np.linalg.norm(a - b )
def UpperCAmelCase_ ( __lowercase : TPos , __lowercase : TPos ) -> Tuple:
'''simple docstring'''
return consistent_heuristic(__UpperCamelCase , __UpperCamelCase ) // t
def UpperCAmelCase_ ( __lowercase : TPos , __lowercase : TPos ) -> Union[str, Any]:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def UpperCAmelCase_ ( __lowercase : TPos , __lowercase : int , __lowercase : TPos , __lowercase : dict[TPos, float] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = g_function[start] + Wa * heuristics[i](__UpperCamelCase , __UpperCamelCase )
return ans
def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : List[Any] , __lowercase : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = np.chararray((n, n) )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
_UpperCAmelCase = "*"
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
_UpperCAmelCase = "#"
_UpperCAmelCase = "-"
_UpperCAmelCase = back_pointer[goal]
while x != start:
((_UpperCAmelCase) , (_UpperCAmelCase)) = x
# print(x)
_UpperCAmelCase = "-"
_UpperCAmelCase = back_pointer[x]
_UpperCAmelCase = "-"
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
_UpperCAmelCase = back_pointer[goal]
while x != start:
print(__UpperCamelCase , end=" " )
_UpperCAmelCase = back_pointer[x]
print(__UpperCamelCase )
sys.exit()
def UpperCAmelCase_ ( __lowercase : TPos ) -> List[str]:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def UpperCAmelCase_ ( __lowercase : str , __lowercase : Dict , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : List[Any] , __lowercase : Union[str, Any] , __lowercase : str , ) -> Dict:
'''simple docstring'''
for itera in range(__UpperCamelCase ):
open_list[itera].remove_element(__UpperCamelCase )
# print("s", s)
# print("j", j)
((_UpperCAmelCase) , (_UpperCAmelCase)) = s
_UpperCAmelCase = (x - 1, y)
_UpperCAmelCase = (x + 1, y)
_UpperCAmelCase = (x, y + 1)
_UpperCAmelCase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(__UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(__UpperCamelCase )
_UpperCAmelCase = -1
_UpperCAmelCase = float("inf" )
if valid(__UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
_UpperCAmelCase = g_function[s] + 1
_UpperCAmelCase = s
if neighbours not in close_list_anchor:
open_list[0].put(__UpperCamelCase , key(__UpperCamelCase , 0 , __UpperCamelCase , __UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , __UpperCamelCase ):
if key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) <= Wa * key(
__UpperCamelCase , 0 , __UpperCamelCase , __UpperCamelCase ):
open_list[j].put(
__UpperCamelCase , key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase_ ( ) -> int:
'''simple docstring'''
_UpperCAmelCase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__SCREAMING_SNAKE_CASE :List[str] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__SCREAMING_SNAKE_CASE :Any = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__SCREAMING_SNAKE_CASE :List[Any] = make_common_ground()
__SCREAMING_SNAKE_CASE :Dict = blocks_blk
# hyper parameters
__SCREAMING_SNAKE_CASE :Any = 1
__SCREAMING_SNAKE_CASE :Optional[Any] = 1
__SCREAMING_SNAKE_CASE :Dict = 20
__SCREAMING_SNAKE_CASE :int = 3 # one consistent and two other inconsistent
# start and end destination
__SCREAMING_SNAKE_CASE :int = (0, 0)
__SCREAMING_SNAKE_CASE :List[Any] = (n - 1, n - 1)
__SCREAMING_SNAKE_CASE :Optional[Any] = 1
def UpperCAmelCase_ ( __lowercase : TPos , __lowercase : TPos , __lowercase : int ) -> int:
'''simple docstring'''
_UpperCAmelCase = {start: 0, goal: float("inf" )}
_UpperCAmelCase = {start: -1, goal: -1}
_UpperCAmelCase = []
_UpperCAmelCase = set()
for i in range(__UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(__UpperCamelCase , key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) )
_UpperCAmelCase = []
_UpperCAmelCase = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , __UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
_UpperCAmelCase , _UpperCAmelCase = open_list[i].top_show()
visited.add(__UpperCamelCase )
expand_state(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
close_list_inad.append(__UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
_UpperCAmelCase = open_list[0].top_show()
visited.add(__UpperCamelCase )
expand_state(
__UpperCamelCase , 0 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
close_list_anchor.append(__UpperCamelCase )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(__UpperCamelCase ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 369 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__) # pylint: disable=invalid-name
__SCREAMING_SNAKE_CASE :List[str] = '''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Union[PIL.Image.Image, np.ndarray]
class A_ ( lowerCAmelCase_ ):
def __init__( self : Any , snake_case_ : PriorTransformer , snake_case_ : CLIPVisionModel , snake_case_ : CLIPImageProcessor , snake_case_ : HeunDiscreteScheduler , snake_case_ : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=snake_case_ , image_encoder=snake_case_ , image_processor=snake_case_ , scheduler=snake_case_ , renderer=snake_case_ , )
def lowercase ( self : List[Any] , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] ):
if latents is None:
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_UpperCAmelCase = latents.to(snake_case_ )
_UpperCAmelCase = latents * scheduler.init_noise_sigma
return latents
def lowercase ( self : Optional[Any] , snake_case_ : Union[str, Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_UpperCAmelCase = torch.device(f'cuda:{gpu_id}' )
_UpperCAmelCase = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case_ , snake_case_ )
@property
def lowercase ( self : List[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(snake_case_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowercase ( self : Optional[Any] , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : int , snake_case_ : List[str] , ):
if isinstance(snake_case_ , snake_case_ ) and isinstance(image[0] , torch.Tensor ):
_UpperCAmelCase = torch.cat(snake_case_ , axis=0 ) if image[0].ndim == 4 else torch.stack(snake_case_ , axis=0 )
if not isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = self.image_processor(snake_case_ , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_UpperCAmelCase = image.to(dtype=self.image_encoder.dtype , device=snake_case_ )
_UpperCAmelCase = self.image_encoder(snake_case_ )["last_hidden_state"]
_UpperCAmelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_UpperCAmelCase = image_embeds.repeat_interleave(snake_case_ , dim=0 )
if do_classifier_free_guidance:
_UpperCAmelCase = torch.zeros_like(snake_case_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(snake_case_ )
def __call__( self : str , snake_case_ : Union[PIL.Image.Image, List[PIL.Image.Image]] , snake_case_ : int = 1 , snake_case_ : int = 2_5 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[torch.FloatTensor] = None , snake_case_ : float = 4.0 , snake_case_ : int = 6_4 , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
elif isinstance(snake_case_ , snake_case_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_UpperCAmelCase = len(snake_case_ )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(snake_case_ )}' )
_UpperCAmelCase = self._execution_device
_UpperCAmelCase = batch_size * num_images_per_prompt
_UpperCAmelCase = guidance_scale > 1.0
_UpperCAmelCase = self._encode_image(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# prior
self.scheduler.set_timesteps(snake_case_ , device=snake_case_ )
_UpperCAmelCase = self.scheduler.timesteps
_UpperCAmelCase = self.prior.config.num_embeddings
_UpperCAmelCase = self.prior.config.embedding_dim
_UpperCAmelCase = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , snake_case_ , snake_case_ , snake_case_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_UpperCAmelCase = latents.reshape(latents.shape[0] , snake_case_ , snake_case_ )
for i, t in enumerate(self.progress_bar(snake_case_ ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
_UpperCAmelCase = self.prior(
snake_case_ , timestep=snake_case_ , proj_embedding=snake_case_ , ).predicted_image_embedding
# remove the variance
_UpperCAmelCase , _UpperCAmelCase = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_UpperCAmelCase , _UpperCAmelCase = noise_pred.chunk(2 )
_UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_UpperCAmelCase = self.scheduler.step(
snake_case_ , timestep=snake_case_ , sample=snake_case_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=snake_case_ )
_UpperCAmelCase = []
for i, latent in enumerate(snake_case_ ):
print()
_UpperCAmelCase = self.renderer.decode(
latent[None, :] , snake_case_ , size=snake_case_ , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(snake_case_ )
_UpperCAmelCase = torch.stack(snake_case_ )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_UpperCAmelCase = images.cpu().numpy()
if output_type == "pil":
_UpperCAmelCase = [self.numpy_to_pil(snake_case_ ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=snake_case_ )
| 156 | 0 |
"""simple docstring"""
import torch
from torch import nn
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any]=1 , lowerCAmelCase_ : Optional[int]=False):
"""simple docstring"""
super().__init__()
lowercase_ = n_token
lowercase_ = d_embed
lowercase_ = d_proj
lowercase_ = cutoffs + [n_token]
lowercase_ = [0] + self.cutoffs
lowercase_ = div_val
lowercase_ = self.cutoffs[0]
lowercase_ = len(self.cutoffs) - 1
lowercase_ = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowercase_ = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed))
lowercase_ = nn.Parameter(torch.zeros(self.n_clusters))
lowercase_ = nn.ModuleList()
lowercase_ = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_)))
else:
self.out_projs.append(lowerCAmelCase_)
self.out_layers.append(nn.Linear(lowerCAmelCase_ , lowerCAmelCase_))
else:
for i in range(len(self.cutoffs)):
lowercase_ , lowercase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase_ = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_)))
self.out_layers.append(nn.Linear(lowerCAmelCase_ , r_idx - l_idx))
lowercase_ = keep_order
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
if proj is None:
lowercase_ = nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowercase_ = nn.functional.linear(lowerCAmelCase_ , proj.t().contiguous())
lowercase_ = nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Tuple=False):
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
lowercase_ = hidden[..., :-1, :].contiguous()
lowercase_ = labels[..., 1:].contiguous()
lowercase_ = hidden.view(-1 , hidden.size(-1))
lowercase_ = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""")
else:
lowercase_ = hidden.view(-1 , hidden.size(-1))
if self.n_clusters == 0:
lowercase_ = self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
if labels is not None:
lowercase_ = labels != -1_0_0
lowercase_ = torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device)
lowercase_ = (
-nn.functional.log_softmax(lowerCAmelCase_ , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1)
)
else:
lowercase_ = nn.functional.log_softmax(lowerCAmelCase_ , dim=-1)
else:
# construct weights and biases
lowercase_ , lowercase_ = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
lowercase_ , lowercase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase_ = self.out_layers[0].weight[l_idx:r_idx]
lowercase_ = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase_ = self.out_layers[i].weight
lowercase_ = self.out_layers[i].bias
if i == 0:
lowercase_ = torch.cat([weight_i, self.cluster_weight] , dim=0)
lowercase_ = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(lowerCAmelCase_)
biases.append(lowerCAmelCase_)
lowercase_ , lowercase_ , lowercase_ = weights[0], biases[0], self.out_projs[0]
lowercase_ = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = nn.functional.log_softmax(lowerCAmelCase_ , dim=1)
if labels is None:
lowercase_ = hidden.new_empty((head_logit.size(0), self.n_token))
else:
lowercase_ = torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device)
lowercase_ = 0
lowercase_ = [0] + self.cutoffs
for i in range(len(lowerCAmelCase_) - 1):
lowercase_ , lowercase_ = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowercase_ = (labels >= l_idx) & (labels < r_idx)
lowercase_ = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowercase_ = labels.index_select(0 , lowerCAmelCase_) - l_idx
lowercase_ = head_logprob.index_select(0 , lowerCAmelCase_)
lowercase_ = hidden.index_select(0 , lowerCAmelCase_)
else:
lowercase_ = hidden
if i == 0:
if labels is not None:
lowercase_ = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1)
else:
lowercase_ = head_logprob[:, : self.cutoffs[0]]
else:
lowercase_ , lowercase_ , lowercase_ = weights[i], biases[i], self.out_projs[i]
lowercase_ = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = nn.functional.log_softmax(lowerCAmelCase_ , dim=1)
lowercase_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowercase_ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None]).squeeze(1)
else:
lowercase_ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowercase_ = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""") and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCAmelCase_ , -logprob_i)
else:
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Dict):
"""simple docstring"""
if self.n_clusters == 0:
lowercase_ = self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
return nn.functional.log_softmax(lowerCAmelCase_ , dim=-1)
else:
# construct weights and biases
lowercase_ , lowercase_ = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
lowercase_ , lowercase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase_ = self.out_layers[0].weight[l_idx:r_idx]
lowercase_ = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase_ = self.out_layers[i].weight
lowercase_ = self.out_layers[i].bias
if i == 0:
lowercase_ = torch.cat([weight_i, self.cluster_weight] , dim=0)
lowercase_ = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(lowerCAmelCase_)
biases.append(lowerCAmelCase_)
lowercase_ , lowercase_ , lowercase_ = weights[0], biases[0], self.out_projs[0]
lowercase_ = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = hidden.new_empty((head_logit.size(0), self.n_token))
lowercase_ = nn.functional.log_softmax(lowerCAmelCase_ , dim=1)
lowercase_ = [0] + self.cutoffs
for i in range(len(lowerCAmelCase_) - 1):
lowercase_ , lowercase_ = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowercase_ = head_logprob[:, : self.cutoffs[0]]
else:
lowercase_ , lowercase_ , lowercase_ = weights[i], biases[i], self.out_projs[i]
lowercase_ = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = nn.functional.log_softmax(lowerCAmelCase_ , dim=1)
lowercase_ = head_logprob[:, -i] + tail_logprob_i
lowercase_ = logprob_i
return out
| 136 |
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = "T5Config"
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "mt5"
lowercase__ = MTaConfig
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "mt5"
lowercase__ = MTaConfig
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "mt5"
lowercase__ = MTaConfig
| 136 | 1 |
"""simple docstring"""
lowerCamelCase__ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCamelCase__ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCamelCase__ = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
assert len(str(_UpperCamelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__lowerCAmelCase : Optional[int] = year // 100
__lowerCAmelCase : int = (5 * (century % 4) + 2) % 7
__lowerCAmelCase : Optional[int] = year % 100
__lowerCAmelCase : Any = centurian % 12
__lowerCAmelCase : Any = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__lowerCAmelCase : List[Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__lowerCAmelCase : Dict = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod() | 182 |
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCamelCase__ = getLogger(__name__)
lowerCamelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 8 , _UpperCamelCase = DEFAULT_DEVICE , _UpperCamelCase=False , _UpperCamelCase="summarization" , _UpperCamelCase=None , **_UpperCamelCase , ):
__lowerCAmelCase : str = Path(_UpperCamelCase ).open('w' , encoding='utf-8' )
__lowerCAmelCase : Union[str, Any] = str(_UpperCamelCase )
__lowerCAmelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase ).to(_UpperCamelCase )
if fpaa:
__lowerCAmelCase : Optional[Any] = model.half()
__lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(_UpperCamelCase )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__lowerCAmelCase : List[Any] = time.time()
# update config with task specific params
use_task_specific_params(_UpperCamelCase , _UpperCamelCase )
if prefix is None:
__lowerCAmelCase : Optional[int] = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(_UpperCamelCase , _UpperCamelCase ) ) ):
__lowerCAmelCase : List[str] = [prefix + text for text in examples_chunk]
__lowerCAmelCase : List[str] = tokenizer(_UpperCamelCase , return_tensors='pt' , truncation=_UpperCamelCase , padding='longest' ).to(_UpperCamelCase )
__lowerCAmelCase : str = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_UpperCamelCase , )
__lowerCAmelCase : str = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
__lowerCAmelCase : Optional[int] = int(time.time() - start_time ) # seconds
__lowerCAmelCase : List[Any] = len(_UpperCamelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def __lowerCAmelCase ():
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def __lowerCAmelCase (_UpperCamelCase=True ):
__lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('model_name' , type=_UpperCamelCase , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=_UpperCamelCase , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=_UpperCamelCase , help='where to save summaries' )
parser.add_argument('--reference_path' , type=_UpperCamelCase , required=_UpperCamelCase , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=_UpperCamelCase , required=_UpperCamelCase , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=_UpperCamelCase , required=_UpperCamelCase , default=_UpperCamelCase , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=_UpperCamelCase , required=_UpperCamelCase , default=_UpperCamelCase , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=_UpperCamelCase , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=_UpperCamelCase , default=8 , required=_UpperCamelCase , help='batch size' )
parser.add_argument(
'--n_obs' , type=_UpperCamelCase , default=-1 , required=_UpperCamelCase , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=_UpperCamelCase , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = parser.parse_known_args()
__lowerCAmelCase : Optional[int] = parse_numeric_n_bool_cl_kwargs(_UpperCamelCase )
if parsed_args and verbose:
print(F"parsed the following generate kwargs: {parsed_args}" )
__lowerCAmelCase : Dict = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowerCAmelCase : int = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_UpperCamelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
__lowerCAmelCase : Optional[Any] = generate_summaries_or_translations(
_UpperCamelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_UpperCamelCase , )
if args.reference_path is None:
return {}
# Compute scores
__lowerCAmelCase : str = calculate_bleu if 'translation' in args.task else calculate_rouge
__lowerCAmelCase : Dict = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowerCAmelCase : Dict = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_UpperCamelCase )]
__lowerCAmelCase : dict = score_fn(_UpperCamelCase , _UpperCamelCase )
scores.update(_UpperCamelCase )
if args.dump_args:
scores.update(_UpperCamelCase )
if args.info:
__lowerCAmelCase : Optional[Any] = args.info
if verbose:
print(_UpperCamelCase )
if args.score_path is not None:
json.dump(_UpperCamelCase , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True) | 182 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case ={
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 4 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : Optional[int] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=1 ) -> str:
"""simple docstring"""
__magic_name__ = tokenizer
__magic_name__ = dataset
__magic_name__ = len(UpperCamelCase__ ) if n_tasks is None else n_tasks
__magic_name__ = n_copies
def __iter__( self : List[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["""prompt"""].strip() )
__magic_name__ = self.tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""pt""" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
__magic_name__ = start_length
__magic_name__ = eof_strings
__magic_name__ = tokenizer
def __call__( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
__magic_name__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCamelCase__ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = re.split("""(%s)""" % """|""".join(A_ ), A_ )
# last string should be ""
return "".join(string_list[:-2] )
def a__ ( A_, A_, A_, A_, A_, A_=20, **A_ ):
'''simple docstring'''
__magic_name__ = defaultdict(A_ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A_ ) ):
with torch.no_grad():
__magic_name__ = batch["""ids"""].shape[-1]
__magic_name__ = accelerator.unwrap_model(A_ ).generate(
input_ids=batch["""ids"""][:, : batch["""input_len"""]], num_return_sequences=A_, **A_ )
# each task is generated batch_size times
__magic_name__ = batch["""task_id"""].repeat(A_ )
__magic_name__ = accelerator.pad_across_processes(
A_, dim=1, pad_index=tokenizer.pad_token_id )
__magic_name__ , __magic_name__ = accelerator.gather((generated_tokens, generated_tasks) )
__magic_name__ = generated_tokens.cpu().numpy()
__magic_name__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A_, A_ ):
gen_token_dict[task].append(A_ )
__magic_name__ = [[] for _ in range(A_ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
__magic_name__ = tokenizer.decode(A_, skip_special_tokens=A_, clean_up_tokenization_spaces=A_ )
code_gens[task].append(remove_last_block(A_ ) )
return code_gens
def a__ ( ):
'''simple docstring'''
__magic_name__ = HfArgumentParser(A_ )
__magic_name__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
__magic_name__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
__magic_name__ = """false"""
if args.num_workers is None:
__magic_name__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
__magic_name__ = Accelerator()
set_seed(args.seed, device_specific=A_ )
# Load model and tokenizer
__magic_name__ = AutoTokenizer.from_pretrained(args.model_ckpt )
__magic_name__ = tokenizer.eos_token
__magic_name__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
__magic_name__ = {
"""do_sample""": args.do_sample,
"""temperature""": args.temperature,
"""max_new_tokens""": args.max_new_tokens,
"""top_p""": args.top_p,
"""top_k""": args.top_k,
"""stopping_criteria""": StoppingCriteriaList([EndOfFunctionCriteria(0, A_, A_ )] ),
}
# Load evaluation dataset and metric
__magic_name__ = load_dataset("""openai_humaneval""" )
__magic_name__ = load_metric("""code_eval""" )
__magic_name__ = args.num_tasks if args.num_tasks is not None else len(human_eval["""test"""] )
__magic_name__ = args.n_samples // args.batch_size
__magic_name__ = TokenizedDataset(A_, human_eval["""test"""], n_copies=A_, n_tasks=A_ )
# do not confuse args.batch_size, which is actually the num_return_sequences
__magic_name__ = DataLoader(A_, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
__magic_name__ = code_eval_metric.compute(references=[""""""], predictions=[[""""""]] )
except ValueError as exception:
print(
"""Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"""
""" flag to enable code evaluation.""" )
raise exception
__magic_name__ , __magic_name__ = accelerator.prepare(A_, A_ )
__magic_name__ = complete_code(
A_, A_, A_, A_, n_tasks=A_, batch_size=args.batch_size, **A_, )
if accelerator.is_main_process:
__magic_name__ = []
for task in tqdm(range(A_ ) ):
__magic_name__ = human_eval["""test"""][task]["""test"""]
__magic_name__ = f'''check({human_eval['test'][task]['entry_point']})'''
references.append("""\n""" + test_func + """\n""" + entry_point )
# Evaluate completions with "code_eval" metric
__magic_name__ , __magic_name__ = code_eval_metric.compute(
references=A_, predictions=A_, num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file, """w""" ) as fp:
json.dump(A_, A_ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 88 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class UpperCamelCase_ (__A ):
__magic_name__ = 42
__magic_name__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.26.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('''>=''', '''0.0.12''')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class UpperCamelCase_ (__A ):
__magic_name__ = 42
__magic_name__ = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 253 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCamelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ (__A ):
__magic_name__ = ['''pixel_values''']
def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 255 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Any , ) -> None:
super().__init__(**lowerCAmelCase_ )
UpperCAmelCase_ : Any = size if size is not None else {"shortest_edge": 256}
UpperCAmelCase_ : List[str] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
UpperCAmelCase_ : Any = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ : Union[str, Any] = get_size_dict(lowerCAmelCase_ , param_name="crop_size" )
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : int = size
UpperCAmelCase_ : Optional[int] = resample
UpperCAmelCase_ : Tuple = do_center_crop
UpperCAmelCase_ : Any = crop_size
UpperCAmelCase_ : List[str] = do_rescale
UpperCAmelCase_ : Dict = rescale_factor
UpperCAmelCase_ : str = do_normalize
UpperCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Any , ) -> np.ndarray:
UpperCAmelCase_ : Any = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase_ : List[Any] = get_resize_output_image_size(lowerCAmelCase_ , size=size["shortest_edge"] , default_to_square=lowerCAmelCase_ )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray:
UpperCAmelCase_ : List[str] = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowerCAmelCase_ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[Any] ) -> np.ndarray:
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[Any] , ) -> np.ndarray:
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Tuple , ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : List[str] = size if size is not None else self.size
UpperCAmelCase_ : Optional[int] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = resample if resample is not None else self.resample
UpperCAmelCase_ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : List[Any] = get_size_dict(lowerCAmelCase_ , param_name="crop_size" )
UpperCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : List[str] = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Optional[Any] = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[Any] = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
UpperCAmelCase_ : Union[str, Any] = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
UpperCAmelCase_ : Tuple = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
UpperCAmelCase_ : Union[str, Any] = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
UpperCAmelCase_ : Any = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
UpperCAmelCase_ : Any = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
UpperCAmelCase_ : int = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Tuple] = None ) -> Optional[int]:
UpperCAmelCase_ : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[int] = target_sizes.numpy()
UpperCAmelCase_ : Dict = []
for idx in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowerCAmelCase_ )
UpperCAmelCase_ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase_ )
else:
UpperCAmelCase_ : Tuple = logits.argmax(dim=1 )
UpperCAmelCase_ : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 253 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 68 |
'''simple docstring'''
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
if len(_SCREAMING_SNAKE_CASE ) == 0:
return []
_snake_case, _snake_case = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )
_snake_case = int(max_value - min_value ) + 1
_snake_case = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in my_list:
buckets[int(i - min_value )].append(_SCREAMING_SNAKE_CASE )
return [v for bucket in buckets for v in sorted(_SCREAMING_SNAKE_CASE )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15] | 341 | 0 |
'''simple docstring'''
import argparse
import datetime
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int ={
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(UpperCamelCase__ ) < 1_1:
raise ValueError('''Must be 10 characters long''' )
# Get month
SCREAMING_SNAKE_CASE__ : int =int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 1_3:
raise ValueError('''Month must be between 1 - 12''' )
SCREAMING_SNAKE_CASE__ : str =date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
SCREAMING_SNAKE_CASE__ : int =int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 3_2:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
SCREAMING_SNAKE_CASE__ : str =date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
SCREAMING_SNAKE_CASE__ : int =int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 4_5 < y < 8_5_0_0:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
SCREAMING_SNAKE_CASE__ : Any =datetime.date(int(UpperCamelCase__ ), int(UpperCamelCase__ ), int(UpperCamelCase__ ) )
# Start math
if m <= 2:
SCREAMING_SNAKE_CASE__ : List[Any] =y - 1
SCREAMING_SNAKE_CASE__ : List[Any] =m + 1_2
# maths var
SCREAMING_SNAKE_CASE__ : int =int(str(UpperCamelCase__ )[:2] )
SCREAMING_SNAKE_CASE__ : int =int(str(UpperCamelCase__ )[2:] )
SCREAMING_SNAKE_CASE__ : int =int(2.6 * m - 5.3_9 )
SCREAMING_SNAKE_CASE__ : int =int(c / 4 )
SCREAMING_SNAKE_CASE__ : int =int(k / 4 )
SCREAMING_SNAKE_CASE__ : int =int(d + k )
SCREAMING_SNAKE_CASE__ : int =int(t + u + v + x )
SCREAMING_SNAKE_CASE__ : int =int(z - (2 * c) )
SCREAMING_SNAKE_CASE__ : int =round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
SCREAMING_SNAKE_CASE__ : str =f"Your date {date_input}, is a {days[str(UpperCamelCase__ )]}!"
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
a_ = parser.parse_args()
zeller(args.date_input) | 363 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """facebook/bart-large-mnli"""
snake_case_ = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
snake_case_ = """text_classifier"""
snake_case_ = AutoTokenizer
snake_case_ = AutoModelForSequenceClassification
snake_case_ = ["""text""", ["""text"""]]
snake_case_ = ["""text"""]
def __magic_name__ ( self : Any ) -> Any:
super().setup()
SCREAMING_SNAKE_CASE__ : int =self.model.config
SCREAMING_SNAKE_CASE__ : int =-1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
SCREAMING_SNAKE_CASE__ : Any =int(__lowercase )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def __magic_name__ ( self : str , __lowercase : Any , __lowercase : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict =labels
return self.pre_processor(
[text] * len(__lowercase ) , [F"This example is {label}" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def __magic_name__ ( self : List[Any] , __lowercase : str ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =outputs.logits
SCREAMING_SNAKE_CASE__ : Any =torch.argmax(logits[:, 2] ).item()
return self._labels[label_id] | 222 | 0 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowercase__ : List[str] = yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
lowercase__ : List[Any] = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowercase__ : Union[str, Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowercase__ : List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowercase__ : Tuple = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowercase__ : Optional[Any] = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowercase__ : Optional[int] = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
lowercase__ : Tuple = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowercase__ : Union[str, Any] = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
lowercase__ : Dict = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowercase__ : Dict = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
lowercase__ : Optional[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowercase__ : int = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
lowercase__ : int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
lowercase__ : List[str] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
lowercase__ : List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
lowercase__ : List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
lowercase__ : int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
lowercase__ : Dict = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
lowercase__ : Tuple = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowercase__ : Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
lowercase__ : Any = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
lowercase__ : Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
lowercase__ : Tuple = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowercase__ : str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
lowercase__ : Any = ''''''
lowercase__ : Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
lowercase__ : List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowercase__ : str = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
assert ReadMe.from_string(_A , _A ).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
with pytest.raises(_A , match=re.escape(expected_error.format(path="root" ) ) ):
snake_case_ = ReadMe.from_string(_A , _A )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
with pytest.raises(_A , match=re.escape(expected_error.format(path="root" ) ) ):
ReadMe.from_string(_A , _A )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
ReadMe.from_string(_A , _A , suppress_parsing_errors=_A )
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = Path(_A ) / 'README.md'
with open(_A , "w+" ) as readme_file:
readme_file.write(_A )
snake_case_ = ReadMe.from_readme(_A , _A ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = Path(_A ) / 'README.md'
with open(_A , "w+" ) as readme_file:
readme_file.write(_A )
snake_case_ = expected_error.format(path=_A )
with pytest.raises(_A , match=re.escape(_A ) ):
snake_case_ = ReadMe.from_readme(_A , _A )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = Path(_A ) / 'README.md'
with open(_A , "w+" ) as readme_file:
readme_file.write(_A )
snake_case_ = expected_error.format(path=_A )
with pytest.raises(_A , match=re.escape(_A ) ):
ReadMe.from_readme(_A , _A )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = Path(_A ) / 'README.md'
with open(_A , "w+" ) as readme_file:
readme_file.write(_A )
ReadMe.from_readme(_A , _A , suppress_parsing_errors=_A )
| 187 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase :Any = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , **_A : Union[str, Any] ) -> Tuple:
super().__init__(**_A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] , _A : Union[str, List[str], "Image", List["Image"]] , **_A : Dict ) -> Dict:
return super().__call__(_A , **_A )
def __lowerCAmelCase ( self : Any , **_A : Dict ) -> Optional[int]:
__magic_name__ : str = {}
if "candidate_labels" in kwargs:
__magic_name__ : str = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
__magic_name__ : Tuple = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __lowerCAmelCase ( self : str , _A : Dict , _A : Optional[Any]=None , _A : int="This is a photo of {}." ) -> int:
__magic_name__ : Dict = load_image(_A )
__magic_name__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
__magic_name__ : Optional[Any] = candidate_labels
__magic_name__ : List[Any] = [hypothesis_template.format(_A ) for x in candidate_labels]
__magic_name__ : str = self.tokenizer(_A , return_tensors=self.framework , padding=_A )
__magic_name__ : Optional[Any] = [text_inputs]
return inputs
def __lowerCAmelCase ( self : Union[str, Any] , _A : Tuple ) -> str:
__magic_name__ : str = model_inputs.pop('candidate_labels' )
__magic_name__ : str = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , _A ):
__magic_name__ : Dict = text_inputs[0]
else:
# Batching case.
__magic_name__ : Optional[Any] = text_inputs[0][0]
__magic_name__ : List[Any] = self.model(**_A , **_A )
__magic_name__ : str = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] ) -> Optional[int]:
__magic_name__ : Tuple = model_outputs.pop('candidate_labels' )
__magic_name__ : Union[str, Any] = model_outputs['logits'][0]
if self.framework == "pt":
__magic_name__ : Tuple = logits.softmax(dim=-1 ).squeeze(-1 )
__magic_name__ : Tuple = probs.tolist()
if not isinstance(_A , _A ):
__magic_name__ : Any = [scores]
elif self.framework == "tf":
__magic_name__ : Any = stable_softmax(_A , axis=-1 )
__magic_name__ : Dict = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__magic_name__ : Union[str, Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] )
]
return result | 331 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowercase_ ( A__ ) -> Tuple:
"""simple docstring"""
snake_case = filter(lambda A__ : p.requires_grad , model.parameters() )
snake_case = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_A = logging.getLogger(__name__)
def lowercase_ ( A__ , A__ ) -> Any:
"""simple docstring"""
if metric == "rouge2":
snake_case = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
snake_case = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
snake_case = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
snake_case = ModelCheckpoint(
dirpath=A__ , filename=A__ , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowercase_ ( A__ , A__ ) -> Union[str, Any]:
"""simple docstring"""
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=A__ , verbose=A__ , )
class lowerCamelCase ( pl.Callback ):
def UpperCAmelCase(self : Dict , _A : Optional[Any] , _A : Dict ) -> Dict:
snake_case = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def UpperCAmelCase(self : Any , _A : pl.Trainer , _A : pl.LightningModule , _A : str , _A : str=True ) -> None:
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
snake_case = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
snake_case = Path(pl_module.hparams.output_dir )
if type_path == "test":
snake_case = od / "test_results.txt"
snake_case = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
snake_case = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
snake_case = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A , "a+" ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
snake_case = metrics[key]
if isinstance(_A , torch.Tensor ):
snake_case = val.item()
snake_case = f'{key}: {val:.6f}\n'
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
snake_case = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(_A )
@rank_zero_only
def UpperCAmelCase(self : int , _A : str , _A : Union[str, Any] ) -> Union[str, Any]:
try:
snake_case = pl_module.model.model.num_parameters()
except AttributeError:
snake_case = pl_module.model.num_parameters()
snake_case = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def UpperCAmelCase(self : List[Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> List[str]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_A , _A , "test" )
@rank_zero_only
def UpperCAmelCase(self : Any , _A : pl.Trainer , _A : Union[str, Any] ) -> Dict:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 137 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_A = logging.getLogger(__name__)
_A = "pytorch_model.bin"
@dataclasses.dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
UpperCAmelCase__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default=A_ , metadata={"help": "A csv or a json file containing the validation data."} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default=A_ , metadata={"help": "The name of the task to train on."} , )
UpperCAmelCase__ : Optional[List[str]] = dataclasses.field(
default=A_ , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
UpperCAmelCase__ : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
UpperCAmelCase__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
UpperCAmelCase__ : Optional[bool] = dataclasses.field(
default=A_ , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
UpperCAmelCase__ : Optional[bool] = dataclasses.field(
default=A_ , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
UpperCAmelCase__ : Optional[bool] = dataclasses.field(
default=A_ , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
UpperCAmelCase__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
UpperCAmelCase__ : Optional[int] = dataclasses.field(
default=1_00 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
UpperCAmelCase__ : Optional[int] = dataclasses.field(
default=A_ , metadata={"help": "Random seed for initialization."} , )
def lowercase_ ( A__ , A__ , A__ , A__ , A__ , A__ ) -> Union[str, Any]:
"""simple docstring"""
snake_case = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
snake_case = dataset.filter(lambda A__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
snake_case = int(eval_result * len(A__ ) )
print(A__ )
snake_case = dataset.sort("probability" , reverse=A__ )
snake_case = dataset.select(range(A__ ) )
snake_case = dataset.remove_columns(["label", "probability"] )
snake_case = dataset.rename_column("prediction" , "label" )
snake_case = dataset.map(lambda A__ : {"label": idalabel[example["label"]]} )
snake_case = dataset.shuffle(seed=args.seed )
snake_case = os.path.join(A__ , F'train_pseudo.{args.data_file_extension}' )
if args.data_file_extension == "csv":
dataset.to_csv(A__ , index=A__ )
else:
dataset.to_json(A__ )
def lowercase_ ( A__ , A__ , A__ , A__ , **A__ ) -> List[Any]:
"""simple docstring"""
snake_case = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
snake_case = STModelArguments(model_name_or_path=A__ )
snake_case = STDataArguments(train_file=A__ , infer_file=A__ )
snake_case = STTrainingArguments(output_dir=A__ )
snake_case = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(A__ ).items():
setattr(A__ , A__ , A__ )
for key, value in kwargs.items():
if hasattr(A__ , A__ ):
setattr(A__ , A__ , A__ )
# Sanity checks
snake_case = {}
snake_case = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
snake_case = args.train_file
snake_case = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
snake_case = args.eval_file
for key in data_files:
snake_case = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], F'`{key}_file` should be a csv or a json file.'
if args.data_file_extension is None:
snake_case = extension
else:
assert extension == args.data_file_extension, F'`{key}_file` should be a {args.data_file_extension} file`.'
assert (
args.eval_metric in datasets.list_metrics()
), F'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
snake_case = F'{args.output_dir}/self-train_iter-{{}}'.format
snake_case = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=A__ )
os.makedirs(A__ , exist_ok=A__ )
accelerator.wait_for_everyone()
snake_case = None
snake_case = None
snake_case = 0
snake_case = False
# Show the progress bar
snake_case = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
snake_case = data_dir_format(A__ )
assert os.path.exists(A__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
snake_case = os.path.join(A__ , "stage-1" )
snake_case = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(A__ , A__ ):
arguments_dict.update({key: value} )
snake_case = os.path.join(A__ , "best-checkpoint" , A__ )
if os.path.exists(A__ ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , A__ , A__ , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , A__ )
finetune(**A__ )
accelerator.wait_for_everyone()
assert os.path.exists(A__ )
logger.info("Self-training job completed: iteration: %d, stage: 1." , A__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
snake_case = os.path.join(A__ , "best-checkpoint" )
snake_case = os.path.join(A__ , "stage-2" )
# Update arguments_dict
snake_case = model_path
snake_case = data_files["train"]
snake_case = current_output_dir
snake_case = os.path.join(A__ , "best-checkpoint" , A__ )
if os.path.exists(A__ ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , A__ , A__ , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , A__ )
finetune(**A__ )
accelerator.wait_for_everyone()
assert os.path.exists(A__ )
logger.info("Self-training job completed: iteration: %d, stage: 2." , A__ )
snake_case = iteration
snake_case = data_dir_format(iteration + 1 )
snake_case = AutoConfig.from_pretrained(os.path.join(A__ , "best-checkpoint" ) )
snake_case = config.idalabel
snake_case = os.path.join(A__ , "eval_results_best-checkpoint.json" )
snake_case = os.path.join(A__ , "test_results_best-checkpoint.json" )
assert os.path.exists(A__ )
with open(A__ , "r" ) as f:
snake_case = float(json.load(A__ )[args.eval_metric] )
snake_case = os.path.join(A__ , "infer_output_best-checkpoint.csv" )
assert os.path.exists(A__ )
# Loading the dataset from local csv or json files.
snake_case = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
snake_case = load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(A__ , exist_ok=A__ )
shutil.copy(A__ , os.path.join(A__ , F'eval_results_iter-{iteration}.json' ) )
if os.path.exists(A__ ):
shutil.copy(A__ , os.path.join(A__ , F'test_results_iter-{iteration}.json' ) )
create_pseudo_labeled_data(A__ , A__ , A__ , A__ , A__ , A__ )
accelerator.wait_for_everyone()
snake_case = os.path.join(A__ , F'train_pseudo.{args.data_file_extension}' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
snake_case = eval_result
if best_iteration is None:
snake_case = new_iteration
snake_case = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
snake_case = new_iteration
snake_case = new_eval_result
snake_case = 0
else:
if new_eval_result == best_eval_result:
snake_case = new_iteration
snake_case = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
snake_case = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , A__ )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A__ , F'eval_results_iter-{iteration}.json' ) , os.path.join(A__ , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A__ , F'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(A__ , "eval_results_best-iteration.json" ) , )
| 137 | 1 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[list[int]]:
_a : list[list[int]] = []
_a : list[int] = []
_a : List[str] = 0
_a : List[str] = sum(lowerCAmelCase_ )
create_state_space_tree(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return result
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> None:
if sum(lowerCAmelCase_ ) > max_sum or (remaining_nums_sum + sum(lowerCAmelCase_ )) < max_sum:
return
if sum(lowerCAmelCase_ ) == max_sum:
result.append(lowerCAmelCase_ )
return
for index in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
create_state_space_tree(
lowerCAmelCase_ , lowerCAmelCase_ , index + 1 , [*path, nums[index]] , lowerCAmelCase_ , remaining_nums_sum - nums[index] , )
__lowerCAmelCase = [3, 34, 4, 12, 5, 2]
__lowerCAmelCase = 9
__lowerCAmelCase = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 89 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 175 | 0 |
_lowerCamelCase : dict[tuple[int, int, int], int] = {}
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
UpperCAmelCase : str = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
UpperCAmelCase : Dict = _calculate(days - 1 , UpperCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
UpperCAmelCase : str = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
UpperCAmelCase : Any = _calculate(days - 1 , UpperCAmelCase , 0 )
UpperCAmelCase : Dict = state_late + state_absent + state_ontime
UpperCAmelCase : Any = prizestrings
return prizestrings
def a__ ( UpperCAmelCase : int = 30 ) -> int:
return _calculate(UpperCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 99 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Dict, __A : Any, __A : Optional[int]=1_3, __A : Any=7, __A : Tuple=True, __A : int=True, __A : Dict=True, __A : Union[str, Any]=True, __A : Optional[int]=9_9, __A : Optional[int]=3_2, __A : Union[str, Any]=5, __A : Optional[int]=4, __A : str=3_7, __A : Union[str, Any]="gelu", __A : Optional[int]=0.1, __A : Optional[Any]=0.1, __A : Any=5_1_2, __A : List[str]=1_6, __A : Optional[int]=2, __A : Union[str, Any]=0.0_2, __A : Optional[int]=False, __A : List[str]=True, __A : int="None", __A : List[str]=3, __A : Any=4, __A : Dict=None, ):
UpperCAmelCase : str = parent
UpperCAmelCase : int = batch_size
UpperCAmelCase : Tuple = seq_length
UpperCAmelCase : Union[str, Any] = is_training
UpperCAmelCase : Dict = use_input_mask
UpperCAmelCase : Optional[Any] = use_token_type_ids
UpperCAmelCase : str = use_labels
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] = max_position_embeddings
UpperCAmelCase : int = type_vocab_size
UpperCAmelCase : str = type_sequence_label_size
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Dict = num_labels
UpperCAmelCase : Optional[Any] = num_choices
UpperCAmelCase : str = relative_attention
UpperCAmelCase : Any = position_biased_input
UpperCAmelCase : str = pos_att_type
UpperCAmelCase : Union[str, Any] = scope
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase : int = None
if self.use_input_mask:
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
UpperCAmelCase : Dict = None
if self.use_token_type_ids:
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCAmelCase : List[str] = None
UpperCAmelCase : str = None
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size], self.num_choices )
UpperCAmelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Any ):
return DebertaVaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, pos_att_type=self.pos_att_type, )
def __magic_name__ ( self : Dict, __A : str ):
self.parent.assertListEqual(list(result.loss.size() ), [] )
def __magic_name__ ( self : List[str], __A : Dict, __A : int, __A : str, __A : List[str], __A : Dict, __A : str, __A : int ):
UpperCAmelCase : Optional[int] = DebertaVaModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[int] = model(__A, attention_mask=__A, token_type_ids=__A )[0]
UpperCAmelCase : Optional[int] = model(__A, token_type_ids=__A )[0]
UpperCAmelCase : int = model(__A )[0]
self.parent.assertListEqual(list(sequence_output.size() ), [self.batch_size, self.seq_length, self.hidden_size] )
def __magic_name__ ( self : Dict, __A : Union[str, Any], __A : Optional[Any], __A : Tuple, __A : Optional[int], __A : List[Any], __A : List[Any], __A : Optional[int] ):
UpperCAmelCase : int = DebertaVaForMaskedLM(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : int = model(__A, attention_mask=__A, token_type_ids=__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : List[str], __A : str, __A : Optional[Any], __A : List[str], __A : Optional[int], __A : List[Any], __A : int, __A : Optional[int] ):
UpperCAmelCase : int = self.num_labels
UpperCAmelCase : Union[str, Any] = DebertaVaForSequenceClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : int = model(__A, attention_mask=__A, token_type_ids=__A, labels=__A )
self.parent.assertListEqual(list(result.logits.size() ), [self.batch_size, self.num_labels] )
self.check_loss_output(__A )
def __magic_name__ ( self : Any, __A : Tuple, __A : Any, __A : str, __A : List[Any], __A : Dict, __A : Optional[Any], __A : List[str] ):
UpperCAmelCase : Dict = self.num_labels
UpperCAmelCase : int = DebertaVaForTokenClassification(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Tuple = model(__A, attention_mask=__A, token_type_ids=__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Tuple, __A : List[str], __A : Tuple, __A : Tuple, __A : int, __A : Optional[Any], __A : Tuple, __A : Any ):
UpperCAmelCase : Union[str, Any] = DebertaVaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Any = model(
__A, attention_mask=__A, token_type_ids=__A, start_positions=__A, end_positions=__A, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Dict, __A : Optional[int], __A : str, __A : List[str], __A : Dict, __A : Optional[Any], __A : Union[str, Any], __A : int ):
UpperCAmelCase : Union[str, Any] = DebertaVaForMultipleChoice(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase : int = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase : Tuple = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase : int = model(
__A, attention_mask=__A, token_type_ids=__A, labels=__A, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : List[str] = config_and_inputs
UpperCAmelCase : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : str = DebertaVaModelTester(self )
UpperCAmelCase : Dict = ConfigTester(self, config_class=__A, hidden_size=3_7 )
def __magic_name__ ( self : Any ):
self.config_tester.run_common_tests()
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__A )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__A )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__A )
def __magic_name__ ( self : Any ):
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__A )
@slow
def __magic_name__ ( self : Dict ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : str = DebertaVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def __magic_name__ ( self : str ):
pass
@slow
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : str = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
UpperCAmelCase : Union[str, Any] = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase : List[str] = model(__A, attention_mask=__A )[0]
# compare the actual values for a slice.
UpperCAmelCase : List[str] = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], __A, atol=1E-4 ), F'''{output[:, 1:4, 1:4]}''' )
| 99 | 1 |
def A_ ( a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : list[list[str]] = [[] for _ in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE_ : int = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(SCREAMING_SNAKE_CASE__ ) <= key:
return input_string
for position, character in enumerate(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE_ : Tuple = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE_ : Any = min(SCREAMING_SNAKE_CASE__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [''''''.join(SCREAMING_SNAKE_CASE__ ) for row in temp_grid]
SCREAMING_SNAKE_CASE_ : Optional[Any] = ''''''.join(SCREAMING_SNAKE_CASE__ )
return output_string
def A_ ( a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
SCREAMING_SNAKE_CASE_ : list[list[str]] = [[] for _ in range(SCREAMING_SNAKE_CASE__ )] # generates template
for position in range(len(SCREAMING_SNAKE_CASE__ ) ):
SCREAMING_SNAKE_CASE_ : Optional[int] = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE_ : Tuple = min(SCREAMING_SNAKE_CASE__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
SCREAMING_SNAKE_CASE_ : List[Any] = 0
for row in temp_grid: # fills in the characters
SCREAMING_SNAKE_CASE_ : Dict = input_string[counter : counter + len(SCREAMING_SNAKE_CASE__ )]
grid.append(list(SCREAMING_SNAKE_CASE__ ) )
counter += len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = '''''' # reads as zigzag
for position in range(len(SCREAMING_SNAKE_CASE__ ) ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {}
for key_guess in range(1 , len(SCREAMING_SNAKE_CASE__ ) ): # tries every key
SCREAMING_SNAKE_CASE_ : int = decrypt(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _A ( SCREAMING_SNAKE_CASE__ : str = "isbn/0140328726" ):
UpperCamelCase :Optional[int] = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
UpperCamelCase :str = F'''{olid} is not a valid Open Library olid'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def _A ( SCREAMING_SNAKE_CASE__ : dict ):
UpperCamelCase :str = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
UpperCamelCase :Optional[Any] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCamelCase :List[str] = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
UpperCamelCase :int = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :List[str] = ''', '''.join(SCREAMING_SNAKE_CASE__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__snake_case = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
__snake_case = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print("""\n""".join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 259 | 0 |
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
a_ : List[str] = len(__A )
for i in range(length - 1 ):
a_ : str = i
for k in range(i + 1 , __A ):
if collection[k] < collection[least]:
a_ : List[str] = k
if least != i:
a_ : List[str] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Tuple = [int(item) for item in user_input.split(',')]
print(selection_sort(unsorted))
| 365 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
UpperCAmelCase_ : str = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase_ : Optional[int] = logging.getLogger()
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
"""simple docstring"""
a_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('-f' )
a_ : Optional[Any] = parser.parse_args()
return args.f
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : List[Any]="eval" ) -> Optional[int]:
"""simple docstring"""
a_ : List[Any] = os.path.join(__A , F"""{split}_results.json""" )
if os.path.exists(__A ):
with open(__A , 'r' ) as f:
return json.load(__A )
raise ValueError(F"""can't find {path}""" )
UpperCAmelCase_ : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
a_ : Optional[Any] = self.get_auto_remove_tmp_dir()
a_ : List[str] = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , 'argv' , SCREAMING_SNAKE_CASE__ ):
run_flax_glue.main()
a_ : str = get_results(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
a_ : List[str] = self.get_auto_remove_tmp_dir()
a_ : Union[str, Any] = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , 'argv' , SCREAMING_SNAKE_CASE__ ):
run_clm_flax.main()
a_ : List[Any] = get_results(SCREAMING_SNAKE_CASE__ )
self.assertLess(result['eval_perplexity'] , 1_0_0 )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
a_ : Tuple = self.get_auto_remove_tmp_dir()
a_ : Dict = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , 'argv' , SCREAMING_SNAKE_CASE__ ):
run_summarization_flax.main()
a_ : List[str] = get_results(SCREAMING_SNAKE_CASE__ , split='test' )
self.assertGreaterEqual(result['test_rouge1'] , 1_0 )
self.assertGreaterEqual(result['test_rouge2'] , 2 )
self.assertGreaterEqual(result['test_rougeL'] , 7 )
self.assertGreaterEqual(result['test_rougeLsum'] , 7 )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
a_ : int = self.get_auto_remove_tmp_dir()
a_ : Dict = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , 'argv' , SCREAMING_SNAKE_CASE__ ):
run_mlm_flax.main()
a_ : List[Any] = get_results(SCREAMING_SNAKE_CASE__ )
self.assertLess(result['eval_perplexity'] , 4_2 )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
a_ : str = self.get_auto_remove_tmp_dir()
a_ : List[str] = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , 'argv' , SCREAMING_SNAKE_CASE__ ):
run_ta_mlm_flax.main()
a_ : Dict = get_results(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.42 )
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
a_ : int = 7 if get_gpu_count() > 1 else 2
a_ : Dict = self.get_auto_remove_tmp_dir()
a_ : Tuple = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , 'argv' , SCREAMING_SNAKE_CASE__ ):
run_flax_ner.main()
a_ : List[str] = get_results(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertGreaterEqual(result['eval_f1'] , 0.3 )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
a_ : List[str] = self.get_auto_remove_tmp_dir()
a_ : int = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , 'argv' , SCREAMING_SNAKE_CASE__ ):
run_qa.main()
a_ : str = get_results(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(result['eval_f1'] , 3_0 )
self.assertGreaterEqual(result['eval_exact'] , 3_0 )
| 120 | 0 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class a_ ( a__ ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCamelCase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , '''num_attention_heads''' ) )
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=64 , _lowerCamelCase=3 , _lowerCamelCase=3 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=16 , _lowerCamelCase=[128, 256, 384] , _lowerCamelCase=[4, 6, 8] , _lowerCamelCase=[2, 3, 4] , _lowerCamelCase=[16, 16, 16] , _lowerCamelCase=0 , _lowerCamelCase=[2, 2, 2] , _lowerCamelCase=[2, 2, 2] , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=2 , ) ->List[Any]:
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : Tuple = image_size
SCREAMING_SNAKE_CASE : List[Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = kernel_size
SCREAMING_SNAKE_CASE : Union[str, Any] = stride
SCREAMING_SNAKE_CASE : int = padding
SCREAMING_SNAKE_CASE : Optional[int] = hidden_sizes
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : str = depths
SCREAMING_SNAKE_CASE : int = key_dim
SCREAMING_SNAKE_CASE : Any = drop_path_rate
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : Dict = attention_ratio
SCREAMING_SNAKE_CASE : int = mlp_ratio
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE : Any = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE : str = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) ->Tuple:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = LevitModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE : Optional[int] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
SCREAMING_SNAKE_CASE : str = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Any:
SCREAMING_SNAKE_CASE : Tuple = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = LevitForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : str = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
'feature-extraction': LevitModel,
'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : int = False
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : int = LevitModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ) ->List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) ->Any:
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) ->str:
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def __lowerCAmelCase ( self ) ->List[Any]:
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def __lowerCAmelCase ( self ) ->Tuple:
pass
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Dict = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : Optional[int] = len(self.model_tester.depths ) + 1
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = (self.model_tester.image_size, self.model_tester.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE : Tuple = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self ) ->Tuple:
pass
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->str:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : int = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowerCamelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE : int = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = model(**_lowerCamelCase ).loss
loss.backward()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Dict = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE : Tuple = model_class(_lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(_lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(**_lowerCamelCase ).loss
loss.backward()
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : int = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowerCamelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = problem_type['''title''']
SCREAMING_SNAKE_CASE : Optional[Any] = problem_type['''num_labels''']
SCREAMING_SNAKE_CASE : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE : List[Any] = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
SCREAMING_SNAKE_CASE : Optional[int] = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowerCamelCase ) as warning_list:
SCREAMING_SNAKE_CASE : Any = model(**_lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def __lowerCAmelCase ( self ) ->Dict:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = LevitModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ) ->List[str]:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = self.default_image_processor
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(**_lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : int = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
| 313 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
a__ : Any = logging.get_logger(__name__)
a__ : Dict = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = 'imagegpt'
__SCREAMING_SNAKE_CASE : Optional[Any] = ['past_key_values']
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowerCamelCase=512 + 1 , _lowerCamelCase=32 * 32 , _lowerCamelCase=512 , _lowerCamelCase=24 , _lowerCamelCase=8 , _lowerCamelCase=None , _lowerCamelCase="quick_gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , **_lowerCamelCase , ) ->str:
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = n_positions
SCREAMING_SNAKE_CASE : Optional[int] = n_embd
SCREAMING_SNAKE_CASE : List[Any] = n_layer
SCREAMING_SNAKE_CASE : List[Any] = n_head
SCREAMING_SNAKE_CASE : int = n_inner
SCREAMING_SNAKE_CASE : Dict = activation_function
SCREAMING_SNAKE_CASE : Union[str, Any] = resid_pdrop
SCREAMING_SNAKE_CASE : Dict = embd_pdrop
SCREAMING_SNAKE_CASE : List[str] = attn_pdrop
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : int = scale_attn_weights
SCREAMING_SNAKE_CASE : Optional[int] = use_cache
SCREAMING_SNAKE_CASE : Optional[Any] = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE : str = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
super().__init__(tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase )
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = 3 , _lowerCamelCase = 32 , _lowerCamelCase = 32 , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = dict(preprocessor(images=_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return inputs
| 313 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[Any] = logging.get_logger(__name__)
def _A ( lowercase ):
"""simple docstring"""
a =YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
a =1_92
a =7_68
a =12
a =3
a =[8_00, 13_33]
a =False
elif yolos_name == "yolos_s_dWr":
a =3_30
a =14
a =6
a =13_20
elif "yolos_s" in yolos_name:
a =3_84
a =15_36
a =12
a =6
elif "yolos_b" in yolos_name:
a =[8_00, 13_44]
a =91
a ="huggingface/label-files"
a ="coco-detection-id2label.json"
a =json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
a ={int(__snake_case ): v for k, v in idalabel.items()}
a =idalabel
a ={v: k for k, v in idalabel.items()}
return config
def _A ( lowercase , lowercase , lowercase = False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a =state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
a =state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a =in_proj_weight[: config.hidden_size, :]
a =in_proj_bias[: config.hidden_size]
a =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a =in_proj_weight[-config.hidden_size :, :]
a =in_proj_bias[-config.hidden_size :]
def _A ( lowercase ):
"""simple docstring"""
if "backbone" in name:
a =name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
a =name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
a =name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
a =name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
a =name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
a =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
a =name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
a =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
a =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
a =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
a =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
a =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
a =name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
a =name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
a =name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
a =name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def _A ( lowercase , lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a =orig_state_dict.pop(__snake_case )
if "qkv" in key:
a =key.split('''.''' )
a =int(key_split[2] )
a =model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
a =val[:dim, :]
a =val[
dim : dim * 2, :
]
a =val[-dim:, :]
else:
a =val[:dim]
a =val[dim : dim * 2]
a =val[-dim:]
else:
a =val
return orig_state_dict
def _A ( ):
"""simple docstring"""
a ="http://images.cocodataset.org/val2017/000000039769.jpg"
a =Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def _A ( lowercase , lowercase , lowercase , lowercase = False ):
"""simple docstring"""
a =get_yolos_config(__snake_case )
# load original state_dict
a =torch.load(__snake_case , map_location='''cpu''' )["model"]
# load 🤗 model
a =YolosForObjectDetection(__snake_case )
model.eval()
a =convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by YolosImageProcessor
a =8_00 if yolos_name != "yolos_ti" else 5_12
a =YolosImageProcessor(format='''coco_detection''' , size=__snake_case )
a =image_processor(images=prepare_img() , return_tensors='''pt''' )
a =model(**__snake_case )
a =outputs.logits, outputs.pred_boxes
a =None, None
if yolos_name == "yolos_ti":
a =torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
a =torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
a =torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
a =torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
a =torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
a =torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
a =torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
a =torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
a =torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
a =torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __snake_case , atol=1E-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
a ={
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print('''Pushing to the hub...''' )
a =model_mapping[yolos_name]
image_processor.push_to_hub(__snake_case , organization='''hustvl''' )
model.push_to_hub(__snake_case , organization='''hustvl''' )
if __name__ == "__main__":
lowerCamelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',"""
""" \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase_ : Dict = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 364 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : int = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 215 | 0 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
a__: Any = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ :
def __init__( self ):
A__ = False
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
if not self.initialized:
A__ = RagRetriever(
__lowerCamelCase,question_encoder_tokenizer=__lowerCamelCase,generator_tokenizer=__lowerCamelCase,index=__lowerCamelCase,init_retrieval=__lowerCamelCase,)
A__ = True
def UpperCamelCase ( self ):
self.retriever.index.init_index()
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ , A__ = self.retriever._main_retrieve(__lowerCamelCase,__lowerCamelCase )
return doc_ids, retrieved_doc_embeds
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def __init__( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase=None ):
if index is not None and index.is_initialized() and len(__lowerCamelCase ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
__lowerCamelCase,question_encoder_tokenizer=__lowerCamelCase,generator_tokenizer=__lowerCamelCase,index=__lowerCamelCase,init_retrieval=__lowerCamelCase,)
A__ = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
for worker in self.retrieval_workers
] )
def UpperCamelCase ( self ):
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
A__ = self.retrieval_workers[random.randint(0,len(self.retrieval_workers ) - 1 )]
A__ , A__ = ray.get(random_worker.retrieve.remote(__lowerCamelCase,__lowerCamelCase ) )
else:
A__ , A__ = self._main_retrieve(__lowerCamelCase,__lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCamelCase )
@classmethod
def UpperCamelCase ( cls,__lowerCamelCase,__lowerCamelCase=None,**__lowerCamelCase ):
return super(__lowerCamelCase,cls ).get_tokenizers(__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase )
@classmethod
def UpperCamelCase ( cls,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase=None,**__lowerCamelCase ):
A__ = kwargs.pop('''config''',__lowerCamelCase ) or RagConfig.from_pretrained(__lowerCamelCase,**__lowerCamelCase )
A__ = RagTokenizer.from_pretrained(__lowerCamelCase,config=__lowerCamelCase )
A__ = rag_tokenizer.question_encoder
A__ = rag_tokenizer.generator
if indexed_dataset is not None:
A__ = '''custom'''
A__ = CustomHFIndex(config.retrieval_vector_size,__lowerCamelCase )
else:
A__ = cls._build_index(__lowerCamelCase )
return cls(
__lowerCamelCase,question_encoder_tokenizer=__lowerCamelCase,generator_tokenizer=__lowerCamelCase,retrieval_workers=__lowerCamelCase,index=__lowerCamelCase,)
| 193 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def UpperCamelCase ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__lowerCamelCase ):
A__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase,__lowerCamelCase )
A__ = FlaxAutoModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase,__lowerCamelCase )
@slow
def UpperCamelCase ( self ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__lowerCamelCase ):
A__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase,__lowerCamelCase )
A__ = FlaxAutoModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase,__lowerCamelCase )
@slow
def UpperCamelCase ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
A__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
A__ = FlaxBertModel.from_pretrained(__lowerCamelCase )
A__ = tokenizer('''Do you support jax jitted function?''',return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowerCamelCase ):
return model(**__lowerCamelCase )
eval(**__lowerCamelCase ).block_until_ready()
@slow
def UpperCamelCase ( self ):
for model_name in ["roberta-base", "roberta-large"]:
A__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
A__ = FlaxRobertaModel.from_pretrained(__lowerCamelCase )
A__ = tokenizer('''Do you support jax jitted function?''',return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowerCamelCase ):
return model(**__lowerCamelCase )
eval(**__lowerCamelCase ).block_until_ready()
def UpperCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCamelCase,'''bert-base is not a local folder and is not a valid model identifier''' ):
A__ = FlaxAutoModel.from_pretrained('''bert-base''' )
def UpperCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCamelCase,r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
A__ = FlaxAutoModel.from_pretrained(__lowerCamelCase,revision='''aaaaaa''' )
def UpperCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCamelCase,'''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''',):
A__ = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def UpperCamelCase ( self ):
with self.assertRaisesRegex(__lowerCamelCase,'''Use `from_pt=True` to load this model''' ):
A__ = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
| 193 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """yolos"""
def __init__( self : List[str] , lowercase_ : Union[str, Any]=768 , lowercase_ : str=12 , lowercase_ : List[str]=12 , lowercase_ : List[str]=3_072 , lowercase_ : Any="gelu" , lowercase_ : Optional[Any]=0.0 , lowercase_ : str=0.0 , lowercase_ : Dict=0.02 , lowercase_ : List[Any]=1E-12 , lowercase_ : Dict=[512, 864] , lowercase_ : List[Any]=16 , lowercase_ : str=3 , lowercase_ : Optional[int]=True , lowercase_ : Any=100 , lowercase_ : Optional[int]=True , lowercase_ : Union[str, Any]=False , lowercase_ : int=1 , lowercase_ : Dict=5 , lowercase_ : Tuple=2 , lowercase_ : str=5 , lowercase_ : Tuple=2 , lowercase_ : List[Any]=0.1 , **lowercase_ : List[str] , ) -> List[str]:
super().__init__(**lowercase_ )
UpperCAmelCase : str = hidden_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Optional[Any] = num_attention_heads
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : Optional[Any] = hidden_dropout_prob
UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : List[Any] = layer_norm_eps
UpperCAmelCase : int = image_size
UpperCAmelCase : List[Any] = patch_size
UpperCAmelCase : Union[str, Any] = num_channels
UpperCAmelCase : Union[str, Any] = qkv_bias
UpperCAmelCase : Tuple = num_detection_tokens
UpperCAmelCase : Dict = use_mid_position_embeddings
UpperCAmelCase : str = auxiliary_loss
# Hungarian matcher
UpperCAmelCase : Union[str, Any] = class_cost
UpperCAmelCase : Any = bbox_cost
UpperCAmelCase : List[Any] = giou_cost
# Loss coefficients
UpperCAmelCase : Any = bbox_loss_coefficient
UpperCAmelCase : Any = giou_loss_coefficient
UpperCAmelCase : List[Any] = eos_coefficient
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = version.parse("""1.11""" )
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase_ ( self : Dict ) -> float:
return 1E-4
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
return 12
| 280 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : int = len(UpperCAmelCase_ )
UpperCAmelCase : int = len(UpperCAmelCase_ )
UpperCAmelCase : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
UpperCAmelCase : list = []
for char_count in range(UpperCAmelCase_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(UpperCAmelCase_ )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 280 | 1 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_A = logging.get_logger(__name__)
class A ( __UpperCamelCase ):
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''', a_, )
super().__init__(*a_, **a_ )
| 278 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def a ( *_UpperCAmelCase : List[str] ):
'''simple docstring'''
with open(_UpperCAmelCase , '''r''' ) as fh:
fcntl.flock(_UpperCAmelCase , fcntl.LOCK_EX )
try:
print(*_UpperCAmelCase )
finally:
fcntl.flock(_UpperCAmelCase , fcntl.LOCK_UN )
__A =int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__A =torch.device("cuda", local_rank)
__A =socket.gethostname()
__A =f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__A =dist.get_rank()
__A =dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 226 | 0 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCamelCase_ = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 4_80_00,
'sample_size': 6_55_36,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 4_80_00,
'sample_size': 6_55_36,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 4_80_00,
'sample_size': 13_10_72,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 1_60_00,
'sample_size': 6_55_36,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 1_60_00,
'sample_size': 6_55_36,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 1_60_00,
'sample_size': 6_55_36,
},
}
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : List[Any] ) -> Tuple:
return torch.atana(__A , __A ) / math.pi * 2
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> Tuple:
_SCREAMING_SNAKE_CASE = torch.sin(t * math.pi / 2 ) ** 2
_SCREAMING_SNAKE_CASE = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(__A , __A )
class lowercase_ ( A ):
"""simple docstring"""
pass
class lowercase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : Dict ):
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE = DiffusionAttnUnetaD(__lowerCamelCase , n_attn_layers=4 )
_SCREAMING_SNAKE_CASE = deepcopy(self.diffusion )
_SCREAMING_SNAKE_CASE = torch.quasirandom.SobolEngine(1 , scramble=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __A : Any ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]["url"]
os.system(f"""wget {url} ./""" )
return f"""./{model_name}.ckpt"""
lowerCamelCase_ = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
lowerCamelCase_ = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
lowerCamelCase_ = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
lowerCamelCase_ = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
lowerCamelCase_ = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
lowerCamelCase_ = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> Dict:
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(f"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] ) -> Union[str, Any]:
for key, value in ATTN_MAP.items():
if name.startswith(__A ) and not isinstance(__A , __A ):
return name.replace(__A , __A )
elif name.startswith(__A ):
return [name.replace(__A , __A ) for v in value]
raise ValueError(f"""Attn error with {name}""" )
def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : List[Any]=13 ) -> List[Any]:
_SCREAMING_SNAKE_CASE = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
_SCREAMING_SNAKE_CASE = 0
if string.startswith("net.3." ):
depth += 1
_SCREAMING_SNAKE_CASE = string[6:]
elif string.startswith("net." ):
_SCREAMING_SNAKE_CASE = string[4:]
while string.startswith("main.7." ):
depth += 1
_SCREAMING_SNAKE_CASE = string[7:]
if string.startswith("main." ):
_SCREAMING_SNAKE_CASE = string[5:]
# mid block
if string[:2].isdigit():
_SCREAMING_SNAKE_CASE = string[:2]
_SCREAMING_SNAKE_CASE = string[2:]
else:
_SCREAMING_SNAKE_CASE = string[0]
_SCREAMING_SNAKE_CASE = string[1:]
if depth == max_depth:
_SCREAMING_SNAKE_CASE = MID_NUM_TO_LAYER[layer_num]
_SCREAMING_SNAKE_CASE = "mid_block"
elif depth > 0 and int(__A ) < 7:
_SCREAMING_SNAKE_CASE = DOWN_NUM_TO_LAYER[layer_num]
_SCREAMING_SNAKE_CASE = f"""down_blocks.{depth}"""
elif depth > 0 and int(__A ) > 7:
_SCREAMING_SNAKE_CASE = UP_NUM_TO_LAYER[layer_num]
_SCREAMING_SNAKE_CASE = f"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
_SCREAMING_SNAKE_CASE = DEPTH_0_TO_LAYER[layer_num]
_SCREAMING_SNAKE_CASE = f"""up_blocks.{max_depth - 1}""" if int(__A ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(f"""Naming error with {input_string} and string_left: {string_left}.""" )
_SCREAMING_SNAKE_CASE = string_left[1:]
if "resnets" in new_layer:
_SCREAMING_SNAKE_CASE = convert_resconv_naming(__A )
elif "attentions" in new_layer:
_SCREAMING_SNAKE_CASE = convert_attn_naming(__A )
_SCREAMING_SNAKE_CASE = new_string_left
if not isinstance(__A , __A ):
_SCREAMING_SNAKE_CASE = prefix + "." + new_layer + "." + string_left
else:
_SCREAMING_SNAKE_CASE = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] ) -> int:
_SCREAMING_SNAKE_CASE = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
_SCREAMING_SNAKE_CASE = rename(__A )
# check if we need to transform from Conv => Linear for attention
if isinstance(__A , __A ):
_SCREAMING_SNAKE_CASE = transform_conv_attns(__A , __A , __A )
else:
_SCREAMING_SNAKE_CASE = v
return new_state_dict
def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : Dict , __A : Tuple ) -> Optional[int]:
if len(__A ) == 1:
if len(v.shape ) == 3:
# weight
_SCREAMING_SNAKE_CASE = v[:, :, 0]
else:
# bias
_SCREAMING_SNAKE_CASE = v
else:
# qkv matrices
_SCREAMING_SNAKE_CASE = v.shape[0]
_SCREAMING_SNAKE_CASE = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_SCREAMING_SNAKE_CASE = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_SCREAMING_SNAKE_CASE = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
_SCREAMING_SNAKE_CASE = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
_SCREAMING_SNAKE_CASE = download(__A )
_SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]["sample_rate"]
_SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]["sample_size"]
_SCREAMING_SNAKE_CASE = Object()
_SCREAMING_SNAKE_CASE = sample_size
_SCREAMING_SNAKE_CASE = sample_rate
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = UNetaDModel(sample_size=__A , sample_rate=__A )
_SCREAMING_SNAKE_CASE = diffusers_model.state_dict()
_SCREAMING_SNAKE_CASE = DiffusionUncond(__A )
orig_model.load_state_dict(torch.load(args.model_path , map_location=__A )["state_dict"] )
_SCREAMING_SNAKE_CASE = orig_model.diffusion_ema.eval()
_SCREAMING_SNAKE_CASE = orig_model.state_dict()
_SCREAMING_SNAKE_CASE = rename_orig_weights(__A )
_SCREAMING_SNAKE_CASE = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_SCREAMING_SNAKE_CASE = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(__A ) == 0, f"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith("kernel" ) for k in list(__A ) ), f"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
_SCREAMING_SNAKE_CASE = value.squeeze()
_SCREAMING_SNAKE_CASE = value
diffusers_model.load_state_dict(__A )
_SCREAMING_SNAKE_CASE = 1_00
_SCREAMING_SNAKE_CASE = 33
_SCREAMING_SNAKE_CASE = IPNDMScheduler(num_train_timesteps=__A )
_SCREAMING_SNAKE_CASE = torch.manual_seed(__A )
_SCREAMING_SNAKE_CASE = torch.randn([1, 2, config.sample_size] , generator=__A ).to(__A )
_SCREAMING_SNAKE_CASE = torch.linspace(1 , 0 , steps + 1 , device=__A )[:-1]
_SCREAMING_SNAKE_CASE = get_crash_schedule(__A )
_SCREAMING_SNAKE_CASE = DanceDiffusionPipeline(unet=__A , scheduler=__A )
_SCREAMING_SNAKE_CASE = torch.manual_seed(33 )
_SCREAMING_SNAKE_CASE = pipe(num_inference_steps=__A , generator=__A ).audios
_SCREAMING_SNAKE_CASE = sampling.iplms_sample(__A , __A , __A , {} )
_SCREAMING_SNAKE_CASE = generated.clamp(-1 , 1 )
_SCREAMING_SNAKE_CASE = (generated - audio).abs().sum()
_SCREAMING_SNAKE_CASE = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , __A )
print("Diff max" , __A )
assert diff_max < 1e-3, f"""Diff max: {diff_max} is too much :-/"""
print(f"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
lowerCamelCase_ = parser.parse_args()
main(args)
| 362 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str:
_SCREAMING_SNAKE_CASE = len(__A )
_SCREAMING_SNAKE_CASE = len(__A )
_SCREAMING_SNAKE_CASE = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_SCREAMING_SNAKE_CASE = []
for char_count in range(__A ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__A )
if __name__ == "__main__":
print(alternative_string_arrange('AB', 'XYZ'), end=' ')
| 111 | 0 |
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Tuple , A : str , A : List[str]=1_0_0 , A : List[str]=1_3 , A : Union[str, Any]=3_0 , A : Union[str, Any]=2 , A : List[Any]=3 , A : Any=True , A : Tuple=True , A : Tuple=3_2 , A : str=5 , A : Any=4 , A : List[str]=3_7 , A : Tuple="gelu" , A : Union[str, Any]=0.1 , A : Tuple=0.1 , A : Union[str, Any]=1_0 , A : List[str]=0.02 , A : Dict=3 , ) ->int:
lowerCamelCase__ : int = parent
lowerCamelCase__ : Tuple = vocab_size
lowerCamelCase__ : Dict = batch_size
lowerCamelCase__ : str = image_size
lowerCamelCase__ : Any = patch_size
lowerCamelCase__ : str = num_channels
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : Tuple = use_labels
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : Tuple = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : List[Any] = (image_size // patch_size) ** 2
lowerCamelCase__ : Tuple = num_patches + 1
def __lowerCamelCase ( self : Optional[int] ) ->List[Any]:
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : List[str] = None
if self.use_labels:
lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Any = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def __lowerCamelCase ( self : List[Any] , A : str , A : List[Any] , A : Any ) ->Tuple:
lowerCamelCase__ : Union[str, Any] = FlaxBeitModel(config=A )
lowerCamelCase__ : int = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self : Union[str, Any] , A : List[str] , A : Optional[int] , A : Dict ) ->Optional[int]:
lowerCamelCase__ : Dict = FlaxBeitForMaskedImageModeling(config=A )
lowerCamelCase__ : Optional[Any] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __lowerCamelCase ( self : Union[str, Any] , A : Optional[Any] , A : Optional[int] , A : List[Any] ) ->Any:
lowerCamelCase__ : Tuple = self.type_sequence_label_size
lowerCamelCase__ : Tuple = FlaxBeitForImageClassification(config=A )
lowerCamelCase__ : Any = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : Union[str, Any] = 1
lowerCamelCase__ : Optional[int] = FlaxBeitForImageClassification(A )
lowerCamelCase__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : List[str] = model(A )
def __lowerCamelCase ( self : Optional[Any] ) ->List[str]:
lowerCamelCase__ : List[Any] = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : str = config_and_inputs
lowerCamelCase__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : int = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def __lowerCamelCase ( self : str ) ->None:
lowerCamelCase__ : Dict = FlaxBeitModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=3_7 )
def __lowerCamelCase ( self : List[str] ) ->Any:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self : str ) ->List[Any]:
lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[str] = model_class(A )
lowerCamelCase__ : int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : str = [*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def __lowerCamelCase ( self : int ) ->List[Any]:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : Union[str, Any] = self._prepare_for_class(A , A )
lowerCamelCase__ : Optional[int] = model_class(A )
@jax.jit
def model_jitted(A : str , **A : Optional[int] ):
return model(pixel_values=A , **A )
with self.subTest('''JIT Enabled''' ):
lowerCamelCase__ : str = model_jitted(**A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCamelCase__ : Dict = model_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCamelCase ( self : Tuple ) ->Tuple:
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def __lowerCamelCase ( self : Dict ) ->Any:
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def __lowerCamelCase ( self : Any ) ->str:
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def __lowerCamelCase ( self : Optional[int] ) ->Tuple:
for model_class_name in self.all_model_classes:
lowerCamelCase__ : List[str] = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
lowerCamelCase__ : Union[str, Any] = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(A )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self : List[Any] ) ->Dict:
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self : str ) ->str:
lowerCamelCase__ : List[str] = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
lowerCamelCase__ : Optional[Any] = self.default_image_processor
lowerCamelCase__ : str = prepare_img()
lowerCamelCase__ : Optional[int] = image_processor(images=A , return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
lowerCamelCase__ : List[str] = np.ones((1, 1_9_6) , dtype=A )
# forward pass
lowerCamelCase__ : Optional[int] = model(pixel_values=A , bool_masked_pos=A )
lowerCamelCase__ : Optional[Any] = outputs.logits
# verify the logits
lowerCamelCase__ : str = (1, 1_9_6, 8_1_9_2)
self.assertEqual(logits.shape , A )
lowerCamelCase__ : Any = np.array(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , A , atol=1e-2 ) )
@slow
def __lowerCamelCase ( self : Dict ) ->List[Any]:
lowerCamelCase__ : Any = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
lowerCamelCase__ : Dict = self.default_image_processor
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : int = image_processor(images=A , return_tensors='''np''' )
# forward pass
lowerCamelCase__ : List[str] = model(**A )
lowerCamelCase__ : Optional[int] = outputs.logits
# verify the logits
lowerCamelCase__ : Union[str, Any] = (1, 1_0_0_0)
self.assertEqual(logits.shape , A )
lowerCamelCase__ : Any = np.array([-1.23_85, -1.09_87, -1.01_08] )
self.assertTrue(np.allclose(logits[0, :3] , A , atol=1e-4 ) )
lowerCamelCase__ : Union[str, Any] = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , A )
@slow
def __lowerCamelCase ( self : int ) ->Tuple:
lowerCamelCase__ : List[Any] = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
lowerCamelCase__ : Any = self.default_image_processor
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Optional[Any] = image_processor(images=A , return_tensors='''np''' )
# forward pass
lowerCamelCase__ : Union[str, Any] = model(**A )
lowerCamelCase__ : Any = outputs.logits
# verify the logits
lowerCamelCase__ : List[str] = (1, 2_1_8_4_1)
self.assertEqual(logits.shape , A )
lowerCamelCase__ : str = np.array([1.68_81, -0.27_87, 0.59_01] )
self.assertTrue(np.allclose(logits[0, :3] , A , atol=1e-4 ) )
lowerCamelCase__ : List[Any] = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , A )
| 142 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCamelCase__ : int = tmp_path / '''file.csv'''
lowerCamelCase__ : Tuple = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Any = tmp_path / '''malformed_file.csv'''
lowerCamelCase__ : List[str] = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Dict = tmp_path / '''csv_with_image.csv'''
lowerCamelCase__ : int = textwrap.dedent(
f"\\n image\n {image_file}\n " )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = tmp_path / '''csv_with_label.csv'''
lowerCamelCase__ : List[Any] = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : int = tmp_path / '''csv_with_int_list.csv'''
lowerCamelCase__ : Dict = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = Csv()
lowerCamelCase__ : List[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(UpperCAmelCase , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(UpperCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
with open(UpperCAmelCase , encoding='''utf-8''' ) as f:
lowerCamelCase__ : Tuple = f.read().splitlines()[1]
lowerCamelCase__ : Any = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
lowerCamelCase__ : List[str] = csv._generate_tables([[csv_file_with_image]] )
lowerCamelCase__ : Dict = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
lowerCamelCase__ : Tuple = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def _a ( UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
with open(UpperCAmelCase , encoding='''utf-8''' ) as f:
lowerCamelCase__ : List[Any] = f.read().splitlines()[1:]
lowerCamelCase__ : List[Any] = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
lowerCamelCase__ : Optional[Any] = csv._generate_tables([[csv_file_with_label]] )
lowerCamelCase__ : Tuple = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
lowerCamelCase__ : str = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(UpperCAmelCase ) for label in labels]
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : List[str] = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda UpperCAmelCase : [int(UpperCAmelCase ) for i in x.split()]} )
lowerCamelCase__ : Optional[Any] = csv._generate_tables([[csv_file_with_int_list]] )
lowerCamelCase__ : Tuple = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
lowerCamelCase__ : Tuple = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 142 | 1 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class A__ ( enum.Enum ):
lowercase = 0
lowercase = 1
lowercase = 2
@add_end_docstrings(__magic_name__ )
class A__ ( __magic_name__ ):
lowercase = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self : Optional[int] , *a : Union[str, Any] , **a : Optional[int] ):
'''simple docstring'''
super().__init__(*a , **a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCAmelCase__ : str = None
if self.model.config.prefix is not None:
lowerCAmelCase__ : List[Any] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCAmelCase__ : List[str] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = self._sanitize_parameters(prefix=a , **self._forward_params )
lowerCAmelCase__ : Any = {**self._preprocess_params, **preprocess_params}
lowerCAmelCase__ : Optional[int] = {**self._forward_params, **forward_params}
def _lowerCamelCase ( self : Optional[Any] , a : Any=None , a : Optional[int]=None , a : str=None , a : Union[str, Any]=None , a : Optional[Any]=None , a : Union[str, Any]=None , a : Optional[Any]=None , a : List[Any]=None , **a : str , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = {}
if prefix is not None:
lowerCAmelCase__ : int = prefix
if prefix:
lowerCAmelCase__ : Optional[Any] = self.tokenizer(
a , padding=a , add_special_tokens=a , return_tensors=self.framework )
lowerCAmelCase__ : Dict = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']' )
lowerCAmelCase__ : List[str] = handle_long_generation
preprocess_params.update(a )
lowerCAmelCase__ : Any = generate_kwargs
lowerCAmelCase__ : Tuple = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
lowerCAmelCase__ : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
lowerCAmelCase__ : Any = ReturnType.TENSORS
if return_type is not None:
lowerCAmelCase__ : Optional[int] = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase__ : Union[str, Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase__ : Optional[int] = self.tokenizer.encode(a , add_special_tokens=a )
if len(a ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
lowerCAmelCase__ : Any = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _lowerCamelCase ( self : Dict , *a : str , **a : List[Any] ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*a , **a )
def __call__( self : str , a : Any , **a : Optional[Any] ):
'''simple docstring'''
return super().__call__(a , **a )
def _lowerCamelCase ( self : str , a : int , a : Tuple="" , a : Optional[int]=None , **a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.tokenizer(
prefix + prompt_text , padding=a , add_special_tokens=a , return_tensors=self.framework )
lowerCAmelCase__ : Optional[int] = prompt_text
if handle_long_generation == "hole":
lowerCAmelCase__ : Optional[int] = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCAmelCase__ : Dict = generate_kwargs['max_new_tokens']
else:
lowerCAmelCase__ : Tuple = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCAmelCase__ : str = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
lowerCAmelCase__ : Union[str, Any] = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
lowerCAmelCase__ : str = inputs['attention_mask'][:, -keep_length:]
return inputs
def _lowerCamelCase ( self : Dict , a : Optional[Any] , **a : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = model_inputs['input_ids']
lowerCAmelCase__ : Optional[Any] = model_inputs.get('attention_mask' , a )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Any = 1
else:
lowerCAmelCase__ : Tuple = input_ids.shape[0]
lowerCAmelCase__ : Dict = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCAmelCase__ : Optional[Any] = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
lowerCAmelCase__ : int = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCAmelCase__ : Optional[int] = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCAmelCase__ : List[Any] = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCAmelCase__ : Any = self.model.generate(input_ids=a , attention_mask=a , **a )
lowerCAmelCase__ : List[str] = generated_sequence.shape[0]
if self.framework == "pt":
lowerCAmelCase__ : List[Any] = generated_sequence.reshape(a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowerCAmelCase__ : Union[str, Any] = tf.reshape(a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : Optional[int]=ReturnType.FULL_TEXT , a : List[Any]=True ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = model_outputs['generated_sequence'][0]
lowerCAmelCase__ : Dict = model_outputs['input_ids']
lowerCAmelCase__ : Dict = model_outputs['prompt_text']
lowerCAmelCase__ : Dict = generated_sequence.numpy().tolist()
lowerCAmelCase__ : str = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCAmelCase__ : Tuple = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCAmelCase__ : int = self.tokenizer.decode(
a , skip_special_tokens=a , clean_up_tokenization_spaces=a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCAmelCase__ : Optional[Any] = 0
else:
lowerCAmelCase__ : Any = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=a , clean_up_tokenization_spaces=a , ) )
if return_type == ReturnType.FULL_TEXT:
lowerCAmelCase__ : List[str] = prompt_text + text[prompt_length:]
else:
lowerCAmelCase__ : Any = text[prompt_length:]
lowerCAmelCase__ : int = {'generated_text': all_text}
records.append(a )
return records | 307 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A__ :
def _lowerCamelCase ( self : List[Any] , a : List[str] , a : Optional[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict , a : int , a : str , a : List[Any] , a : Dict , a : List[str]=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(a , a )
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel(a )
lowerCAmelCase__ : Tuple = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def _lowerCamelCase ( self : Union[str, Any] , a : Dict , a : Tuple , a : Dict , a : Union[str, Any] , a : List[Any]=None , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.get_vision_text_model(a , a )
lowerCAmelCase__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Optional[int] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : List[str] , a : Optional[int] , a : Optional[int] , a : Union[str, Any] , a : List[Any] , a : Any=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[Any] = {'vision_model': vision_model, 'text_model': text_model}
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**a )
lowerCAmelCase__ : Union[str, Any] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : Any , a : Optional[int] , a : Optional[int] , a : Dict , a : Optional[int] , a : Optional[int]=None , **a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Dict = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : List[str] = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : int = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = after_output[0].numpy()
lowerCAmelCase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
def _lowerCamelCase ( self : List[str] , a : Dict , a : Optional[int] , a : List[Any] , a : str , a : int=None , **a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : str = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Optional[int] = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Optional[Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : str = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : List[Any] , a : np.ndarray , a : np.ndarray , a : float ):
'''simple docstring'''
lowerCAmelCase__ : int = np.abs((a - b) ).max()
self.assertLessEqual(a , a , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.get_pretrained_model_and_inputs()
lowerCAmelCase__ : List[Any] = model_a(**a )
lowerCAmelCase__ : Optional[int] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a )
lowerCAmelCase__ : str = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : List[str] = model_a(**a )
lowerCAmelCase__ : int = after_outputs[0].numpy()
lowerCAmelCase__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : int = 13
lowerCAmelCase__ : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : List[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : List[Any] , a : Dict , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFViTModel(a , name='vision_model' )
lowerCAmelCase__ : str = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFViTModelTester(self )
lowerCAmelCase__ : Tuple = TFBertModelTester(self )
lowerCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
lowerCAmelCase__ : Tuple = 13
lowerCAmelCase__ : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Any = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : Optional[Any] , a : Dict , a : Dict , a : Any , a : Any=None , **a : int ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Any = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCAmelCase__ : str = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Union[str, Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : List[str] = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : int , a : Optional[int] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModel(a , name='vision_model' )
lowerCAmelCase__ : List[Any] = TFRobertaModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModelTester(self )
lowerCAmelCase__ : List[str] = TFRobertaModelTester(self )
lowerCAmelCase__ : str = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : Dict = 13
lowerCAmelCase__ : str = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Union[str, Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : int , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFCLIPVisionModel(a , name='vision_model' )
lowerCAmelCase__ : List[str] = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = TFCLIPVisionModelTester(self )
lowerCAmelCase__ : Union[str, Any] = TFBertModelTester(self )
lowerCAmelCase__ : Any = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Any = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=a )
lowerCAmelCase__ : List[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
lowerCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ : Any = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=a , padding=a , return_tensors='np' )
lowerCAmelCase__ : Union[str, Any] = model(**a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase__ : List[str] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , a , atol=1E-3 ) ) | 307 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_UpperCamelCase : Optional[int] = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Tuple = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **a ) -> List[Any]:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase__ : Optional[Any] = deprecated_arg[3:]
lowercase__ : Dict = not kwargs.pop(a )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
lowercase__ : Dict = kwargs.pop('tpu_name' , self.tpu_name )
lowercase__ : Optional[int] = kwargs.pop('device_idx' , self.device_idx )
lowercase__ : List[Any] = kwargs.pop('eager_mode' , self.eager_mode )
lowercase__ : Union[str, Any] = kwargs.pop('use_xla' , self.use_xla )
super().__init__(**a )
lowerCamelCase__ : str = field(
default=_a , metadata={"help": "Name of TPU"} , )
lowerCamelCase__ : int = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
lowerCamelCase__ : bool = field(default=_a , metadata={"help": "Benchmark models in eager model."})
lowerCamelCase__ : bool = field(
default=_a , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def _UpperCAmelCase ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ['tf'] )
lowercase__ : Dict = None
if self.tpu:
try:
if self.tpu_name:
lowercase__ : Tuple = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowercase__ : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowercase__ : str = None
return tpu
@cached_property
def _UpperCAmelCase ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowercase__ : Union[str, Any] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' )
lowercase__ : str = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , 'GPU' ) # disable GPU
lowercase__ : Dict = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def _UpperCAmelCase ( self ) -> bool:
requires_backends(self , ['tf'] )
return self._setup_tpu is not None
@property
def _UpperCAmelCase ( self ) -> "tf.distribute.Strategy":
requires_backends(self , ['tf'] )
return self._setup_strategy
@property
def _UpperCAmelCase ( self ) -> Tuple:
requires_backends(self , ['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def _UpperCAmelCase ( self ) -> int:
requires_backends(self , ['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _UpperCAmelCase ( self ) -> bool:
return self.n_gpu > 0
| 77 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = '''ibert'''
def __init__( self : int , lowerCAmelCase__ : Any=3_0_5_2_2 , lowerCAmelCase__ : List[Any]=7_6_8 , lowerCAmelCase__ : int=1_2 , lowerCAmelCase__ : List[Any]=1_2 , lowerCAmelCase__ : Optional[int]=3_0_7_2 , lowerCAmelCase__ : List[str]="gelu" , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Optional[Any]=5_1_2 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : Union[str, Any]=1e-12 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Optional[Any]=0 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : Optional[Any]="absolute" , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Tuple="none" , **lowerCAmelCase__ : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Optional[int] = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : str = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Union[str, Any] = type_vocab_size
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Tuple = layer_norm_eps
_UpperCAmelCase : Optional[int] = position_embedding_type
_UpperCAmelCase : Any = quant_mode
_UpperCAmelCase : Optional[Any] = force_dequant
class A__ ( UpperCamelCase ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 145 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Union[str, Any] = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 313 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=False ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase_ = """"""
else:
lowercase_ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
lowercase_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ = in_proj_weight[
: config.hidden_size, :
]
lowercase_ = in_proj_bias[: config.hidden_size]
lowercase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = dct.pop(__lowerCAmelCase )
lowercase_ = val
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = ViTMSNConfig()
lowercase_ = 10_00
lowercase_ = """datasets/huggingface/label-files"""
lowercase_ = """imagenet-1k-id2label.json"""
lowercase_ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase ) , """r""" ) )
lowercase_ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase_ = 3_84
lowercase_ = 15_36
lowercase_ = 6
elif "l16" in checkpoint_url:
lowercase_ = 10_24
lowercase_ = 40_96
lowercase_ = 24
lowercase_ = 16
lowercase_ = 0.1
elif "b4" in checkpoint_url:
lowercase_ = 4
elif "l7" in checkpoint_url:
lowercase_ = 7
lowercase_ = 10_24
lowercase_ = 40_96
lowercase_ = 24
lowercase_ = 16
lowercase_ = 0.1
lowercase_ = ViTMSNModel(__lowerCAmelCase )
lowercase_ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" )["""target_encoder"""]
lowercase_ = ViTImageProcessor(size=config.image_size )
remove_projection_head(__lowerCAmelCase )
lowercase_ = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , base_model=__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
lowercase_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
lowercase_ = ViTImageProcessor(
size=config.image_size , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase )
lowercase_ = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowercase_ = model(**__lowerCAmelCase )
lowercase_ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase_ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowercase_ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowercase_ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowercase_ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowercase_ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __lowerCAmelCase , atol=1E-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase : Tuple = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 313 | 1 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
lowerCAmelCase__ = '''base_with_context'''
def snake_case_ ( A_ : Optional[Any], A_ : str ):
'''simple docstring'''
_lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
_lowerCamelCase : str = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ), requires_grad=A_ )
for lyr_num, lyr in enumerate(model.encoders ):
_lowerCamelCase : Union[str, Any] = weights[F'''layers_{lyr_num}''']
_lowerCamelCase : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
_lowerCamelCase : Optional[int] = ly_weight['''attention''']
_lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
_lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
_lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
_lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
_lowerCamelCase : str = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
_lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
_lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
_lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
_lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def snake_case_ ( A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
_lowerCamelCase : List[Any] = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ), requires_grad=A_ )
for lyr_num, lyr in enumerate(model.encoders ):
_lowerCamelCase : int = weights[F'''layers_{lyr_num}''']
_lowerCamelCase : List[str] = ly_weight['''attention''']
_lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
_lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
_lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
_lowerCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
_lowerCamelCase : str = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
_lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
_lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
_lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
_lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
_lowerCamelCase : int = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def snake_case_ ( A_ : Dict, A_ : str ):
'''simple docstring'''
_lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
_lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
_lowerCamelCase : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ), requires_grad=A_ )
_lowerCamelCase : str = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_lowerCamelCase : Dict = weights[F'''layers_{lyr_num}''']
_lowerCamelCase : Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
_lowerCamelCase : str = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
_lowerCamelCase : List[str] = ly_weight['''self_attention''']
_lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
_lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
_lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
_lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
_lowerCamelCase : Union[str, Any] = ly_weight['''MultiHeadDotProductAttention_0''']
_lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
_lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
_lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
_lowerCamelCase : int = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
_lowerCamelCase : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
_lowerCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
_lowerCamelCase : int = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
_lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
_lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
_lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
_lowerCamelCase : int = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
_lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def snake_case_ ( A_ : int ):
'''simple docstring'''
_lowerCamelCase : Any = checkpoints.load_tax_checkpoint(args.checkpoint_path )
_lowerCamelCase : Optional[int] = jnp.tree_util.tree_map(onp.array, A_ )
_lowerCamelCase : Dict = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
_lowerCamelCase : List[Any] = os.path.join(args.checkpoint_path, '''..''', '''config.gin''' )
_lowerCamelCase : Tuple = inference.parse_training_gin_file(A_, A_ )
_lowerCamelCase : Optional[Any] = inference.InferenceModel(args.checkpoint_path, A_ )
_lowerCamelCase : List[str] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''', variance_type='''fixed_large''' )
_lowerCamelCase : List[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''], vocab_size=synth_model.model.module.config.vocab_size, d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj='''gated-gelu''', )
_lowerCamelCase : List[str] = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims, targets_context_length=synth_model.sequence_length['''targets_context'''], d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj='''gated-gelu''', )
_lowerCamelCase : Dict = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims, targets_length=synth_model.sequence_length['''targets_context'''], max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time, d_model=synth_model.model.module.config.emb_dim, num_layers=synth_model.model.module.config.num_decoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, dropout_rate=synth_model.model.module.config.dropout_rate, )
_lowerCamelCase : Optional[Any] = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''], A_ )
_lowerCamelCase : Any = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''], A_ )
_lowerCamelCase : int = load_decoder(ta_checkpoint['''target''']['''decoder'''], A_ )
_lowerCamelCase : str = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
_lowerCamelCase : Optional[int] = SpectrogramDiffusionPipeline(
notes_encoder=A_, continuous_encoder=A_, decoder=A_, scheduler=A_, melgan=A_, )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=F"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
lowerCAmelCase__ = parser.parse_args()
main(args)
| 72 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 | 1 |
from math import pi, sqrt
def lowerCamelCase_ ( _a ):
"""simple docstring"""
if num <= 0:
raise ValueError('''math domain error''' )
if num > 1_71.5:
raise OverflowError('''math range error''' )
elif num - int(_a ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(_a )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase_ ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(_a )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCamelCase = 1.0
while num:
lowerCamelCase = float(input('''Gamma of: '''))
print(f'''gamma({num}) = {gamma(num)}''')
print('''\nEnter 0 to exit...''')
| 211 |
# using dfs for finding eulerian path traversal
def lowerCamelCase_ ( _a , _a , _a , _a=None ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = True, True
lowerCAmelCase__ : Any = dfs(_a , _a , _a , _a )
return path
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : str = -1
for i in range(_a ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowerCAmelCase__ : Tuple = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = check_circuit_or_path(_a , _a )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
lowerCAmelCase__ : Optional[int] = 1
if check == 2:
lowerCAmelCase__ : Any = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
lowerCAmelCase__ : Optional[int] = dfs(_a , _a , _a )
print(_a )
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowerCAmelCase__ : Tuple = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowerCAmelCase__ : str = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowerCAmelCase__ : List[str] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowerCAmelCase__ : List[Any] = {
1: [],
2: []
# all degree is zero
}
lowerCAmelCase__ : Optional[Any] = 10
check_euler(_a , _a )
check_euler(_a , _a )
check_euler(_a , _a )
check_euler(_a , _a )
check_euler(_a , _a )
if __name__ == "__main__":
main()
| 211 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Any = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 167 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : int = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class lowercase__ ( lowercase ):
lowercase__ = """xlm-prophetnet"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[Union[str, Callable]] = "gelu" ,lowerCamelCase__ : Optional[int] = 30522 ,lowerCamelCase__ : Optional[int] = 1024 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[int] = 512 ,lowerCamelCase__ : Optional[float] = 0.0_2 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 2 ,lowerCamelCase__ : Optional[int] = 32 ,lowerCamelCase__ : Optional[int] = 128 ,lowerCamelCase__ : Optional[bool] = False ,lowerCamelCase__ : Optional[float] = 0.0 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 1 ,lowerCamelCase__ : Optional[int] = 2 ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : str = encoder_ffn_dim
_UpperCamelCase : List[Any] = num_encoder_layers
_UpperCamelCase : Tuple = num_encoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : List[Any] = num_decoder_layers
_UpperCamelCase : List[Any] = num_decoder_attention_heads
_UpperCamelCase : Optional[Any] = max_position_embeddings
_UpperCamelCase : str = init_std # Normal(0, this parameter)
_UpperCamelCase : List[str] = activation_function
# parameters for xlmprophetnet
_UpperCamelCase : Tuple = ngram
_UpperCamelCase : Optional[Any] = num_buckets
_UpperCamelCase : Tuple = relative_max_distance
_UpperCamelCase : str = disable_ngram_loss
_UpperCamelCase : str = eps
# 3 Types of Dropout
_UpperCamelCase : Union[str, Any] = attention_dropout
_UpperCamelCase : str = activation_dropout
_UpperCamelCase : List[str] = dropout
_UpperCamelCase : Tuple = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,add_cross_attention=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 83 | 0 |
"""simple docstring"""
import functools
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> int:
UpperCAmelCase__ : List[Any] = len(lowerCAmelCase )
UpperCAmelCase__ : Tuple = len(lowerCAmelCase )
@functools.cache
def min_distance(lowerCAmelCase , lowerCAmelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase__ : List[Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowerCAmelCase ) , 1 + min_distance(lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 166 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase=0.01 , _lowerCamelCase=1000 ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = p_stop
UpperCAmelCase__ : Any = max_length
def __iter__(self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : Tuple = False
while not stop and count < self.max_length:
yield count
count += 1
UpperCAmelCase__ : Dict = random.random() < self.p_stop
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=True ):
"""simple docstring"""
UpperCAmelCase__ : int = [
BatchSamplerShard(_lowerCamelCase , 2 , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
for i in range(2 )
]
UpperCAmelCase__ : List[Any] = [list(_lowerCamelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_lowerCamelCase ) for shard in batch_sampler_shards] , [len(_lowerCamelCase ) for e in expected] )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : List[str] = BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCAmelCase__ : List[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : int = BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCAmelCase__ : Any = BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCAmelCase__ : Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Tuple = BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
# Check the shards when the dataset is very small.
UpperCAmelCase__ : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : int = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
UpperCAmelCase__ : List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCAmelCase__ : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
UpperCAmelCase__ : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCAmelCase__ : List[str] = BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
UpperCAmelCase__ : List[str] = BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
# Check the shards when the dataset is very small.
UpperCAmelCase__ : List[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCAmelCase__ : int = BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCAmelCase__ : Optional[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCAmelCase__ : Union[str, Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : Dict = BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is very small.
UpperCAmelCase__ : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Tuple = [[[0, 1]], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : Any = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCAmelCase__ : List[str] = BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCAmelCase__ : List[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : List[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is very small.
UpperCAmelCase__ : str = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : List[str] = [[[0, 1]], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : str = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Tuple = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
UpperCAmelCase__ : List[str] = [BatchSamplerShard(_lowerCamelCase , 2 , _lowerCamelCase , even_batches=_lowerCamelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=False ):
"""simple docstring"""
random.seed(_lowerCamelCase )
UpperCAmelCase__ : Tuple = list(_lowerCamelCase )
UpperCAmelCase__ : Any = [
IterableDatasetShard(
_lowerCamelCase , batch_size=_lowerCamelCase , drop_last=_lowerCamelCase , num_processes=_lowerCamelCase , process_index=_lowerCamelCase , split_batches=_lowerCamelCase , )
for i in range(_lowerCamelCase )
]
UpperCAmelCase__ : List[Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_lowerCamelCase )
iterable_dataset_lists.append(list(_lowerCamelCase ) )
UpperCAmelCase__ : Union[str, Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
UpperCAmelCase__ : str = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
self.assertTrue(len(_lowerCamelCase ) % shard_batch_size == 0 )
UpperCAmelCase__ : Union[str, Any] = []
for idx in range(0 , len(_lowerCamelCase ) , _lowerCamelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_lowerCamelCase ) < len(_lowerCamelCase ):
reference += reference
self.assertListEqual(_lowerCamelCase , reference[: len(_lowerCamelCase )] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = 42
UpperCAmelCase__ : List[Any] = RandomIterableDataset()
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
# Edge case with a very small dataset
UpperCAmelCase__ : str = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = BatchSampler(range(16 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : int = SkipBatchSampler(_lowerCamelCase , 2 )
self.assertListEqual(list(_lowerCamelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = DataLoader(list(range(16 ) ) , batch_size=4 )
UpperCAmelCase__ : int = skip_first_batches(_lowerCamelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def _a (self ):
"""simple docstring"""
Accelerator()
UpperCAmelCase__ : Dict = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 166 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.dummy_uncond_unet
__SCREAMING_SNAKE_CASE : List[str] = KarrasVeScheduler()
__SCREAMING_SNAKE_CASE : str = KarrasVePipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="""numpy""" ).images
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = pipe(num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="""numpy""" , return_dict=lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__SCREAMING_SNAKE_CASE : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = """google/ncsnpp-celebahq-256"""
__SCREAMING_SNAKE_CASE : Dict = UNetaDModel.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = KarrasVeScheduler()
__SCREAMING_SNAKE_CASE : Tuple = KarrasVePipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = pipe(num_inference_steps=2_0 , generator=lowerCAmelCase__ , output_type="""numpy""" ).images
__SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__SCREAMING_SNAKE_CASE : int = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 112 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCamelCase__ : Dict = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def lowerCAmelCase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: Optional[int]=None ):
require_version(deps[pkg] , _lowerCamelCase ) | 112 | 1 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCAmelCase_ ( snake_case_ : str="ro" , snake_case_ : int="en" , snake_case_ : Union[str, Any]="wmt16" , snake_case_ : Optional[Any]=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
UpperCAmelCase_ = f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
UpperCAmelCase_ = datasets.load_dataset(snake_case_ , snake_case_ )
if save_dir is None:
UpperCAmelCase_ = f"""{dataset}-{pair}"""
UpperCAmelCase_ = Path(snake_case_ )
save_dir.mkdir(exist_ok=snake_case_ )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
UpperCAmelCase_ = "val" if split == "validation" else split
UpperCAmelCase_ = save_dir.joinpath(f"""{fn}.source""" )
UpperCAmelCase_ = save_dir.joinpath(f"""{fn}.target""" )
UpperCAmelCase_ = src_path.open("w+" )
UpperCAmelCase_ = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCAmelCase_ = x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 106 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int = 1_00_00_00 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = limit + 1
UpperCAmelCase_ = [0] * limit
for first_term in range(1 , snake_case_ ):
for n in range(snake_case_ , snake_case_ , snake_case_ ):
UpperCAmelCase_ = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
UpperCAmelCase_ = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 106 | 1 |
import requests
__lowerCamelCase : Optional[int] = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='''
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["""articles"""] , 1 ):
print(f"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
| 219 | import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__lowerCamelCase : List[str] = NewType('''DataClass''', Any)
__lowerCamelCase : Dict = NewType('''DataClassType''', Any)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> int:
"""simple docstring"""
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list ) -> Callable[[str], Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {str(__UpperCamelCase ): choice for choice in choices}
return lambda __UpperCamelCase : str_to_choice.get(__UpperCamelCase , __UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( *,
__UpperCamelCase : Union[str, List[str]] = None , __UpperCamelCase : str = None , __UpperCamelCase : Any = dataclasses.MISSING , __UpperCamelCase : Callable[[], Any] = dataclasses.MISSING , __UpperCamelCase : dict = None , **__UpperCamelCase : Dict , ) -> dataclasses.Field:
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
SCREAMING_SNAKE_CASE__ = {}
if aliases is not None:
SCREAMING_SNAKE_CASE__ = aliases
if help is not None:
SCREAMING_SNAKE_CASE__ = help
return dataclasses.field(metadata=__UpperCamelCase , default=__UpperCamelCase , default_factory=__UpperCamelCase , **__UpperCamelCase )
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = 42
def __init__( self : int , _lowercase : Union[DataClassType, Iterable[DataClassType]] , **_lowercase : List[str] ):
"""simple docstring"""
if "formatter_class" not in kwargs:
SCREAMING_SNAKE_CASE__ = ArgumentDefaultsHelpFormatter
super().__init__(**_lowercase )
if dataclasses.is_dataclass(_lowercase ):
SCREAMING_SNAKE_CASE__ = [dataclass_types]
SCREAMING_SNAKE_CASE__ = list(_lowercase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_lowercase )
@staticmethod
def __a ( _lowercase : ArgumentParser , _lowercase : dataclasses.Field ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = f"""--{field.name}"""
SCREAMING_SNAKE_CASE__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _lowercase ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""aliases""" , [] )
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ = [aliases]
SCREAMING_SNAKE_CASE__ = getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(_lowercase , """UnionType""" ) and isinstance(_lowercase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_lowercase ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
f""" Problem encountered in field '{field.name}'.""" )
if type(_lowercase ) not in field.type.__args__:
# filter `str` in Union
SCREAMING_SNAKE_CASE__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
SCREAMING_SNAKE_CASE__ = getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
SCREAMING_SNAKE_CASE__ = (
field.type.__args__[0] if isinstance(_lowercase , field.type.__args__[1] ) else field.type.__args__[1]
)
SCREAMING_SNAKE_CASE__ = getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
SCREAMING_SNAKE_CASE__ = {}
if origin_type is Literal or (isinstance(field.type , _lowercase ) and issubclass(field.type , _lowercase )):
if origin_type is Literal:
SCREAMING_SNAKE_CASE__ = field.type.__args__
else:
SCREAMING_SNAKE_CASE__ = [x.value for x in field.type]
SCREAMING_SNAKE_CASE__ = make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default
else:
SCREAMING_SNAKE_CASE__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
SCREAMING_SNAKE_CASE__ = copy(_lowercase )
# Hack because type=bool in argparse does not behave as we want.
SCREAMING_SNAKE_CASE__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
SCREAMING_SNAKE_CASE__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
SCREAMING_SNAKE_CASE__ = default
# This tells argparse we accept 0 or 1 value after --field_name
SCREAMING_SNAKE_CASE__ = """?"""
# This is the value that will get picked if we do --field_name (without value)
SCREAMING_SNAKE_CASE__ = True
elif isclass(_lowercase ) and issubclass(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ = field.type.__args__[0]
SCREAMING_SNAKE_CASE__ = """+"""
if field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default_factory()
elif field.default is dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = True
else:
SCREAMING_SNAKE_CASE__ = field.type
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default
elif field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default_factory()
else:
SCREAMING_SNAKE_CASE__ = True
parser.add_argument(_lowercase , *_lowercase , **_lowercase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
SCREAMING_SNAKE_CASE__ = False
parser.add_argument(f"""--no_{field.name}""" , action="""store_false""" , dest=field.name , **_lowercase )
def __a ( self : List[str] , _lowercase : DataClassType ):
"""simple docstring"""
if hasattr(_lowercase , """_argument_group_name""" ):
SCREAMING_SNAKE_CASE__ = self.add_argument_group(dtype._argument_group_name )
else:
SCREAMING_SNAKE_CASE__ = self
try:
SCREAMING_SNAKE_CASE__ = get_type_hints(_lowercase )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_lowercase ):
SCREAMING_SNAKE_CASE__ = """.""".join(map(_lowercase , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(_lowercase ):
if not field.init:
continue
SCREAMING_SNAKE_CASE__ = type_hints[field.name]
self._parse_dataclass_field(_lowercase , _lowercase )
def __a ( self : str , _lowercase : int=None , _lowercase : Optional[Any]=False , _lowercase : Union[str, Any]=True , _lowercase : Any=None , _lowercase : int=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
SCREAMING_SNAKE_CASE__ = []
if args_filename:
args_files.append(Path(_lowercase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
SCREAMING_SNAKE_CASE__ = ArgumentParser()
args_file_parser.add_argument(_lowercase , type=_lowercase , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = args_file_parser.parse_known_args(args=_lowercase )
SCREAMING_SNAKE_CASE__ = vars(_lowercase ).get(args_file_flag.lstrip("""-""" ) , _lowercase )
if cmd_args_file_paths:
args_files.extend([Path(_lowercase ) for p in cmd_args_file_paths] )
SCREAMING_SNAKE_CASE__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
SCREAMING_SNAKE_CASE__ = file_args + args if args is not None else file_args + sys.argv[1:]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.parse_known_args(args=_lowercase )
SCREAMING_SNAKE_CASE__ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE__ = {f.name for f in dataclasses.fields(_lowercase ) if f.init}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in vars(_lowercase ).items() if k in keys}
for k in keys:
delattr(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = dtype(**_lowercase )
outputs.append(_lowercase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_lowercase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def __a ( self : List[str] , _lowercase : Dict[str, Any] , _lowercase : bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = set(args.keys() )
SCREAMING_SNAKE_CASE__ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE__ = {f.name for f in dataclasses.fields(_lowercase ) if f.init}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
SCREAMING_SNAKE_CASE__ = dtype(**_lowercase )
outputs.append(_lowercase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(_lowercase )}""" )
return tuple(_lowercase )
def __a ( self : List[str] , _lowercase : str , _lowercase : bool = False ):
"""simple docstring"""
with open(Path(_lowercase ) , encoding="""utf-8""" ) as open_json_file:
SCREAMING_SNAKE_CASE__ = json.loads(open_json_file.read() )
SCREAMING_SNAKE_CASE__ = self.parse_dict(_lowercase , allow_extra_keys=_lowercase )
return tuple(_lowercase )
def __a ( self : Tuple , _lowercase : str , _lowercase : bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.parse_dict(yaml.safe_load(Path(_lowercase ).read_text() ) , allow_extra_keys=_lowercase )
return tuple(_lowercase )
| 219 | 1 |
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
UpperCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(
_SCREAMING_SNAKE_CASE , R"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class _snake_case ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[Any] ) -> np.ndarray:
if self.framework == "tf":
UpperCAmelCase_ : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCAmelCase_ : Optional[int] = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=lowerCamelCase_ )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[int] ) -> np.ndarray:
UpperCAmelCase_ : List[Any] = self.get_masked_index(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" ,self.model.base_model_prefix ,F'''No mask_token ({self.tokenizer.mask_token}) found on the input''' ,)
def A__ ( self: List[Any] ,lowerCamelCase_: str ) -> str:
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(lowerCamelCase_ )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Any=None ,**lowerCamelCase_: List[str] ) -> Dict[str, GenericTensor]:
if return_tensors is None:
UpperCAmelCase_ : List[Any] = self.framework
UpperCAmelCase_ : Optional[Any] = self.tokenizer(lowerCamelCase_ ,return_tensors=lowerCamelCase_ )
self.ensure_exactly_one_mask_token(lowerCamelCase_ )
return model_inputs
def A__ ( self: Tuple ,lowerCamelCase_: List[Any] ) -> Optional[int]:
UpperCAmelCase_ : int = self.model(**lowerCamelCase_ )
UpperCAmelCase_ : Dict = model_inputs["""input_ids"""]
return model_outputs
def A__ ( self: Optional[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Dict=5 ,lowerCamelCase_: Optional[int]=None ) -> Dict:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCAmelCase_ : int = target_ids.shape[0]
UpperCAmelCase_ : Tuple = model_outputs["""input_ids"""][0]
UpperCAmelCase_ : List[Any] = model_outputs["""logits"""]
if self.framework == "tf":
UpperCAmelCase_ : Union[str, Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCAmelCase_ : Optional[int] = outputs.numpy()
UpperCAmelCase_ : str = outputs[0, masked_index, :]
UpperCAmelCase_ : List[Any] = stable_softmax(lowerCamelCase_ ,axis=-1 )
if target_ids is not None:
UpperCAmelCase_ : Any = tf.gather_nd(tf.squeeze(lowerCamelCase_ ,0 ) ,target_ids.reshape(-1 ,1 ) )
UpperCAmelCase_ : Optional[Any] = tf.expand_dims(lowerCamelCase_ ,0 )
UpperCAmelCase_ : Tuple = tf.math.top_k(lowerCamelCase_ ,k=lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = topk.values.numpy(), topk.indices.numpy()
else:
UpperCAmelCase_ : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=lowerCamelCase_ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCAmelCase_ : int = outputs[0, masked_index, :]
UpperCAmelCase_ : List[Any] = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCAmelCase_ : Dict = probs[..., target_ids]
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = probs.topk(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Tuple = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() ,predictions.tolist() ) ):
UpperCAmelCase_ : List[Any] = []
for v, p in zip(_values ,_predictions ):
# Copy is important since we're going to modify this array in place
UpperCAmelCase_ : List[Any] = input_ids.numpy().copy()
if target_ids is not None:
UpperCAmelCase_ : Optional[int] = target_ids[p].tolist()
UpperCAmelCase_ : Optional[Any] = p
# Filter padding out:
UpperCAmelCase_ : Optional[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCAmelCase_ : Tuple = self.tokenizer.decode(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(lowerCamelCase_ )
result.append(lowerCamelCase_ )
if single_mask:
return result[0]
return result
def A__ ( self: str ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int=None ) -> int:
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : List[Any] = [targets]
try:
UpperCAmelCase_ : Any = self.tokenizer.get_vocab()
except Exception:
UpperCAmelCase_ : str = {}
UpperCAmelCase_ : str = []
for target in targets:
UpperCAmelCase_ : str = vocab.get(lowerCamelCase_ ,lowerCamelCase_ )
if id_ is None:
UpperCAmelCase_ : Dict = self.tokenizer(
lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ,max_length=1 ,truncation=lowerCamelCase_ ,)["""input_ids"""]
if len(lowerCamelCase_ ) == 0:
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
UpperCAmelCase_ : List[str] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
F'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
UpperCAmelCase_ : Optional[Any] = list(set(lowerCamelCase_ ) )
if len(lowerCamelCase_ ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
UpperCAmelCase_ : Tuple = np.array(lowerCamelCase_ )
return target_ids
def A__ ( self: Optional[int] ,lowerCamelCase_: Optional[int]=None ,lowerCamelCase_: List[str]=None ) -> Optional[Any]:
UpperCAmelCase_ : str = {}
if targets is not None:
UpperCAmelCase_ : Tuple = self.get_target_ids(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = target_ids
if top_k is not None:
UpperCAmelCase_ : Optional[int] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" ,self.model.base_model_prefix ,"""The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self: List[Any] ,lowerCamelCase_: List[str] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[str] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = super().__call__(lowerCamelCase_ ,**lowerCamelCase_ )
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) and len(lowerCamelCase_ ) == 1:
return outputs[0]
return outputs
| 365 |
import os
def lowerCamelCase_ ( _a : str = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(_a ) , _a ) ) as input_file:
UpperCAmelCase_ : Dict = [
[int(_a ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
UpperCAmelCase_ : Any = len(_a )
UpperCAmelCase_ : Tuple = len(matrix[0] )
UpperCAmelCase_ : Optional[int] = [[-1 for _ in range(_a )] for _ in range(_a )]
for i in range(_a ):
UpperCAmelCase_ : Optional[Any] = matrix[i][0]
for j in range(1 , _a ):
for i in range(_a ):
UpperCAmelCase_ : str = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _a ):
UpperCAmelCase_ : Optional[int] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
UpperCAmelCase_ : Union[str, Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F"{solution() = }")
| 59 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.