code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class a_ :
def __init__( self : int , snake_case__ : List[Any] , snake_case__ : Tuple=13 , snake_case__ : int=7 , snake_case__ : Union[str, Any]=True , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=True , snake_case__ : int=True , snake_case__ : List[Any]=99 , snake_case__ : Dict=32 , snake_case__ : Any=2 , snake_case__ : List[str]=4 , snake_case__ : str=37 , snake_case__ : Optional[Any]="gelu" , snake_case__ : str=0.1 , snake_case__ : Dict=0.1 , snake_case__ : List[Any]=512 , snake_case__ : Any=16 , snake_case__ : List[Any]=2 , snake_case__ : Union[str, Any]=0.02 , snake_case__ : Union[str, Any]=3 , snake_case__ : List[str]=4 , snake_case__ : Optional[int]=None , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = 13
lowerCAmelCase__ = 7
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = 99
lowerCAmelCase__ = 32
lowerCAmelCase__ = 2
lowerCAmelCase__ = 4
lowerCAmelCase__ = 37
lowerCAmelCase__ = """gelu"""
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 512
lowerCAmelCase__ = 16
lowerCAmelCase__ = 2
lowerCAmelCase__ = 0.02
lowerCAmelCase__ = 3
lowerCAmelCase__ = 4
lowerCAmelCase__ = None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str ):
lowerCAmelCase__ = TFRoFormerModel(config=snake_case__ )
lowerCAmelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : int , snake_case__ : int ):
lowerCAmelCase__ = True
lowerCAmelCase__ = TFRoFormerForCausalLM(config=snake_case__ )
lowerCAmelCase__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase__ = model(snake_case__ )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : Tuple ):
lowerCAmelCase__ = TFRoFormerForMaskedLM(config=snake_case__ )
lowerCAmelCase__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Tuple ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFRoFormerForSequenceClassification(config=snake_case__ )
lowerCAmelCase__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Dict ):
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFRoFormerForMultipleChoice(config=snake_case__ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Tuple ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFRoFormerForTokenClassification(config=snake_case__ )
lowerCAmelCase__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : int , snake_case__ : Tuple ):
lowerCAmelCase__ = TFRoFormerForQuestionAnswering(config=snake_case__ )
lowerCAmelCase__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Any = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : List[str] = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : Tuple = False
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Any ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = TFRoFormerModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(snake_case__ )
@require_tf
class a_ ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
lowerCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase__ = model(snake_case__ )[0]
# TODO Replace vocab size
lowerCAmelCase__ = 50000
lowerCAmelCase__ = [1, 6, vocab_size]
self.assertEqual(output.shape , snake_case__ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowerCAmelCase__ = tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case__ , atol=1E-4 )
@require_tf
class a_ ( unittest.TestCase ):
UpperCamelCase_ : str = 1e-4
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = tf.constant([[4, 10]] )
lowerCAmelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowerCAmelCase__ = emba(input_ids.shape )
lowerCAmelCase__ = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(snake_case__ , snake_case__ , atol=self.tolerance )
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
lowerCAmelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowerCAmelCase__ = emba.weight[:3, :5]
tf.debugging.assert_near(snake_case__ , snake_case__ , atol=self.tolerance )
@require_tf
class a_ ( unittest.TestCase ):
UpperCamelCase_ : Union[str, Any] = 1e-4
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
# 2,12,16,64
lowerCAmelCase__ = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase__ = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowerCAmelCase__ = embed_positions([2, 16, 768] )[None, None, :, :]
lowerCAmelCase__ , lowerCAmelCase__ = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase__ = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
lowerCAmelCase__ = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , snake_case__ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , snake_case__ , atol=self.tolerance )
| 674 | """simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self : Optional[int] ):
lowerCAmelCase__ = """"""
lowerCAmelCase__ = """"""
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = 256
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Union[str, Any] ):
lowerCAmelCase__ = cva.imread(snake_case__ , 0 )
lowerCAmelCase__ = copy.deepcopy(self.img )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="""x""" )
lowerCAmelCase__ = np.sum(snake_case__ )
for i in range(len(snake_case__ ) ):
lowerCAmelCase__ = x[i] / self.k
self.sk += prk
lowerCAmelCase__ = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase__ = int(last % last )
lowerCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case__ )
lowerCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase__ = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase__ = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
__lowerCAmelCase : Dict = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
__lowerCAmelCase : Optional[int] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 674 | 1 |
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = len(lowerCamelCase__ )
lowerCAmelCase__ = len(matrix[0] )
lowerCAmelCase__ = min(lowerCamelCase__ , lowerCamelCase__ )
for row in range(lowerCamelCase__ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , lowerCamelCase__ ):
lowerCAmelCase__ = matrix[col][row] / matrix[row][row]
for i in range(lowerCamelCase__ , lowerCamelCase__ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
lowerCAmelCase__ = True
for i in range(row + 1 , lowerCamelCase__ ):
if matrix[i][row] != 0:
lowerCAmelCase__ , lowerCAmelCase__ = matrix[i], matrix[row]
lowerCAmelCase__ = False
break
if reduce:
rank -= 1
for i in range(lowerCamelCase__ ):
lowerCAmelCase__ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 674 | """simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : List[str] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCamelCase_ : str = "CIDAS/clipseg-rd64-refined"
UpperCamelCase_ : Any = "image_segmenter"
UpperCamelCase_ : Optional[Any] = CLIPSegForImageSegmentation
UpperCamelCase_ : List[str] = ["image", "text"]
UpperCamelCase_ : int = ["image"]
def __init__( self : Tuple , *snake_case__ : str , **snake_case__ : Optional[Any] ):
requires_backends(self , ["""vision"""] )
super().__init__(*snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : "Image" , snake_case__ : str ):
return self.pre_processor(text=[label] , images=[image] , padding=snake_case__ , return_tensors="""pt""" )
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Tuple ):
with torch.no_grad():
lowerCAmelCase__ = self.model(**snake_case__ ).logits
return logits
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : List[Any] ):
lowerCAmelCase__ = outputs.cpu().detach().numpy()
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 674 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : List[str] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__lowerCAmelCase : str = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for attribute in key.split(""".""" ):
lowerCAmelCase__ = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
lowerCAmelCase__ = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
lowerCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
elif weight_type == "running_mean":
lowerCAmelCase__ = value
elif weight_type == "running_var":
lowerCAmelCase__ = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase__ = value
elif weight_type == "inv_freq":
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = fairseq_model.state_dict()
lowerCAmelCase__ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == """group""" , )
lowerCAmelCase__ = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
lowerCAmelCase__ = mapped_key.replace("""*""" , lowerCamelCase__ )
if "pos_bias_u" in name:
lowerCAmelCase__ = None
elif "pos_bias_v" in name:
lowerCAmelCase__ = None
elif "weight_g" in name:
lowerCAmelCase__ = """weight_g"""
elif "weight_v" in name:
lowerCAmelCase__ = """weight_v"""
elif "bias" in name:
lowerCAmelCase__ = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase__ = """weight"""
elif "running_mean" in name:
lowerCAmelCase__ = """running_mean"""
elif "inv_freq" in name:
lowerCAmelCase__ = """inv_freq"""
elif "running_var" in name:
lowerCAmelCase__ = """running_var"""
elif "num_batches_tracked" in name:
lowerCAmelCase__ = """num_batches_tracked"""
else:
lowerCAmelCase__ = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = full_name.split("""conv_layers.""" )[-1]
lowerCAmelCase__ = name.split(""".""" )
lowerCAmelCase__ = int(items[0] )
lowerCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True ):
"""simple docstring"""
if config_path is not None:
lowerCAmelCase__ = WavaVecaConformerConfig.from_pretrained(lowerCamelCase__ , hidden_act="""swish""" )
else:
lowerCAmelCase__ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCAmelCase__ = """rotary"""
if is_finetuned:
if dict_path:
lowerCAmelCase__ = Dictionary.load(lowerCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ = target_dict.pad_index
lowerCAmelCase__ = target_dict.bos_index
lowerCAmelCase__ = target_dict.eos_index
lowerCAmelCase__ = len(target_dict.symbols )
lowerCAmelCase__ = os.path.join(lowerCamelCase__ , """vocab.json""" )
if not os.path.isdir(lowerCamelCase__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCamelCase__ ) )
return
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
lowerCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = WavaVecaCTCTokenizer(
lowerCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCamelCase__ , )
lowerCAmelCase__ = True if config.feat_extract_norm == """layer""" else False
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
lowerCAmelCase__ = WavaVecaProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
lowerCAmelCase__ = WavaVecaConformerForCTC(lowerCamelCase__ )
else:
lowerCAmelCase__ = WavaVecaConformerForPreTraining(lowerCamelCase__ )
if is_finetuned:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
lowerCAmelCase__ = argparse.Namespace(task="""audio_pretraining""" )
lowerCAmelCase__ = fairseq.tasks.setup_task(lowerCamelCase__ )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCamelCase__ )
lowerCAmelCase__ = model[0].eval()
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__lowerCAmelCase : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 674 | """simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = LayoutLMTokenizer
UpperCamelCase_ : List[Any] = LayoutLMTokenizerFast
UpperCamelCase_ : Dict = True
UpperCamelCase_ : Any = True
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
super().setUp()
lowerCAmelCase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : int , **snake_case__ : Union[str, Any] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Tuple ):
lowerCAmelCase__ = """UNwant\u00E9d,running"""
lowerCAmelCase__ = """unwanted, running"""
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(snake_case__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [7, 4, 5, 10, 8, 9] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
| 674 | 1 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__lowerCAmelCase : Any = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
def __init__( self : List[str] , snake_case__ : int = 14 ):
if group not in primes:
raise ValueError("""Unsupported Group""" )
lowerCAmelCase__ = primes[group]["""prime"""]
lowerCAmelCase__ = primes[group]["""generator"""]
lowerCAmelCase__ = int(hexlify(urandom(32 ) ) , base=16 )
def _SCREAMING_SNAKE_CASE ( self : Any ):
return hex(self.__private_key )[2:]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = pow(self.generator , self.__private_key , self.prime )
return hex(snake_case__ )[2:]
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(snake_case__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : str ):
lowerCAmelCase__ = int(snake_case__ , base=16 )
if not self.is_valid_public_key(snake_case__ ):
raise ValueError("""Invalid public key""" )
lowerCAmelCase__ = pow(snake_case__ , self.__private_key , self.prime )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case__ : int , snake_case__ : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(snake_case__ , (prime - 1) // 2 , snake_case__ ) == 1
)
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case__ : str , snake_case__ : str , snake_case__ : int = 14 ):
lowerCAmelCase__ = int(snake_case__ , base=16 )
lowerCAmelCase__ = int(snake_case__ , base=16 )
lowerCAmelCase__ = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(snake_case__ , snake_case__ ):
raise ValueError("""Invalid public key""" )
lowerCAmelCase__ = pow(snake_case__ , snake_case__ , snake_case__ )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 674 | """simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__lowerCAmelCase : Any = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
def __init__( self : List[str] , snake_case__ : int = 14 ):
if group not in primes:
raise ValueError("""Unsupported Group""" )
lowerCAmelCase__ = primes[group]["""prime"""]
lowerCAmelCase__ = primes[group]["""generator"""]
lowerCAmelCase__ = int(hexlify(urandom(32 ) ) , base=16 )
def _SCREAMING_SNAKE_CASE ( self : Any ):
return hex(self.__private_key )[2:]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = pow(self.generator , self.__private_key , self.prime )
return hex(snake_case__ )[2:]
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(snake_case__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : str ):
lowerCAmelCase__ = int(snake_case__ , base=16 )
if not self.is_valid_public_key(snake_case__ ):
raise ValueError("""Invalid public key""" )
lowerCAmelCase__ = pow(snake_case__ , self.__private_key , self.prime )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case__ : int , snake_case__ : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(snake_case__ , (prime - 1) // 2 , snake_case__ ) == 1
)
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case__ : str , snake_case__ : str , snake_case__ : int = 14 ):
lowerCAmelCase__ = int(snake_case__ , base=16 )
lowerCAmelCase__ = int(snake_case__ , base=16 )
lowerCAmelCase__ = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(snake_case__ , snake_case__ ):
raise ValueError("""Invalid public key""" )
lowerCAmelCase__ = pow(snake_case__ , snake_case__ , snake_case__ )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 674 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowerCAmelCase : Tuple = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 674 | """simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(weights[0] )
lowerCAmelCase__ = np.asarray(weights[1] )
lowerCAmelCase__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(weights[0] )
lowerCAmelCase__ = np.asarray(weights[1] )
lowerCAmelCase__ = np.asarray(weights[2] )
lowerCAmelCase__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = weights[0][0][0]
lowerCAmelCase__ = np.asarray(layer_norm_a[0] )
lowerCAmelCase__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# lsh weights + output
lowerCAmelCase__ = weights[0][1]
if len(lowerCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
else:
set_layer_weights_in_torch_local(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
# intermediate weighs
lowerCAmelCase__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase__ ) == 4:
lowerCAmelCase__ = intermediate_weights[2]
# layernorm 2
lowerCAmelCase__ = np.asarray(intermediate_weights[0][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# intermediate dense
lowerCAmelCase__ = np.asarray(intermediate_weights[1][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
# intermediate out
lowerCAmelCase__ = np.asarray(intermediate_weights[4][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = torch_model.reformer
# word embeds
lowerCAmelCase__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase__ ) , )
if isinstance(weights[3] , lowerCamelCase__ ):
lowerCAmelCase__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCAmelCase__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
lowerCAmelCase__ = nn.Parameter(torch.tensor(lowerCamelCase__ ) )
lowerCAmelCase__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCAmelCase__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# output layer norm
lowerCAmelCase__ = np.asarray(weights[7][0] )
lowerCAmelCase__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# output embeddings
lowerCAmelCase__ = np.asarray(weights[9][0] )
lowerCAmelCase__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = ReformerConfig.from_json_file(lowerCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = ReformerModelWithLMHead(lowerCamelCase__ )
with open(lowerCamelCase__ , """rb""" ) as f:
lowerCAmelCase__ = pickle.load(lowerCamelCase__ )["""weights"""]
set_model_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 674 | 1 |
"""simple docstring"""
from ....utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class a_ ( __UpperCamelCase ):
def __init__( self : List[str] , snake_case__ : int , snake_case__ : Union[str, Any]=None , snake_case__ : Union[str, Any]=2048 ):
lowerCAmelCase__ = config.__dict__
lowerCAmelCase__ = modal_hidden_size
if num_labels:
lowerCAmelCase__ = num_labels
| 674 | """simple docstring"""
import os
from math import logaa
def _UpperCAmelCase ( lowerCamelCase__ = "base_exp.txt" ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCamelCase__ ) , lowerCamelCase__ ) ) ):
lowerCAmelCase__ , lowerCAmelCase__ = list(map(lowerCamelCase__ , line.split(""",""" ) ) )
if x * logaa(lowerCamelCase__ ) > largest:
lowerCAmelCase__ = x * logaa(lowerCamelCase__ )
lowerCAmelCase__ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 674 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase : Union[str, Any] = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__lowerCAmelCase : Optional[Any] = {
"yjernite/retribert-base-uncased": 5_12,
}
__lowerCAmelCase : List[str] = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Optional[int] = VOCAB_FILES_NAMES
UpperCamelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : List[str] = RetriBertTokenizer
UpperCamelCase_ : List[str] = ["input_ids", "attention_mask"]
def __init__( self : Tuple , snake_case__ : Union[str, Any]=None , snake_case__ : int=None , snake_case__ : Dict=True , snake_case__ : Union[str, Any]="[UNK]" , snake_case__ : Dict="[SEP]" , snake_case__ : Tuple="[PAD]" , snake_case__ : Optional[Any]="[CLS]" , snake_case__ : List[str]="[MASK]" , snake_case__ : str=True , snake_case__ : str=None , **snake_case__ : Dict , ):
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case__ ) != tokenize_chinese_chars
):
lowerCAmelCase__ = getattr(snake_case__ , normalizer_state.pop("""type""" ) )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = strip_accents
lowerCAmelCase__ = tokenize_chinese_chars
lowerCAmelCase__ = normalizer_class(**snake_case__ )
lowerCAmelCase__ = do_lower_case
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Optional[Any] , snake_case__ : Optional[int]=None ):
lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
lowerCAmelCase__ = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 674 | """simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
while b:
lowerCAmelCase__ , lowerCAmelCase__ = b, a % b
return a
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCamelCase__ , a % b )
def _UpperCAmelCase ( ):
"""simple docstring"""
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 674 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowerCAmelCase__ = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" )
lowerCAmelCase__ = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" )
lowerCAmelCase__ = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" )
lowerCAmelCase__ = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" )
lowerCAmelCase__ = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" )
lowerCAmelCase__ = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" )
lowerCAmelCase__ = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" )
lowerCAmelCase__ = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" )
lowerCAmelCase__ = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" )
lowerCAmelCase__ = key.replace("""image_encoder.module""" , """flava.image_model""" )
lowerCAmelCase__ = key.replace("""text_encoder.module""" , """flava.text_model""" )
lowerCAmelCase__ = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" )
lowerCAmelCase__ = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" )
lowerCAmelCase__ = key.replace("""text_projection""" , """flava.text_projection""" )
lowerCAmelCase__ = key.replace("""image_projection""" , """flava.image_projection""" )
lowerCAmelCase__ = value.float()
for key, value in codebook_state_dict.items():
lowerCAmelCase__ = value
return upgrade
@torch.no_grad()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
if config_path is not None:
lowerCAmelCase__ = FlavaConfig.from_pretrained(lowerCamelCase__ )
else:
lowerCAmelCase__ = FlavaConfig()
lowerCAmelCase__ = FlavaForPreTraining(lowerCamelCase__ ).eval()
lowerCAmelCase__ = convert_dalle_checkpoint(lowerCamelCase__ , lowerCamelCase__ , save_checkpoint=lowerCamelCase__ )
if os.path.exists(lowerCamelCase__ ):
lowerCAmelCase__ = torch.load(lowerCamelCase__ , map_location="""cpu""" )
else:
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" )
lowerCAmelCase__ = upgrade_state_dict(lowerCamelCase__ , lowerCamelCase__ )
hf_model.load_state_dict(lowerCamelCase__ )
lowerCAmelCase__ = hf_model.state_dict()
lowerCAmelCase__ = count_parameters(lowerCamelCase__ )
lowerCAmelCase__ = count_parameters(lowerCamelCase__ ) + count_parameters(lowerCamelCase__ )
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
hf_model.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 674 | """simple docstring"""
import os
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = os.path.dirname(os.path.realpath(lowerCamelCase__ ) )
lowerCAmelCase__ = os.path.join(lowerCamelCase__ , """triangle.txt""" )
with open(lowerCamelCase__ ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = []
for line in triangle:
lowerCAmelCase__ = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(lowerCamelCase__ ) )
a.append(lowerCamelCase__ )
for i in range(1 , len(lowerCamelCase__ ) ):
for j in range(len(a[i] ) ):
lowerCAmelCase__ = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowerCAmelCase__ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowerCamelCase__ , lowerCamelCase__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 674 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = BertConfig.from_json_file(lowerCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = BertForPreTraining(lowerCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 674 | """simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__lowerCAmelCase : Union[str, Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
__lowerCAmelCase : Optional[int] = json.load(f)
@require_torch
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Dict ):
return FSMTTokenizer.from_pretrained(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Any ):
lowerCAmelCase__ = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Any , snake_case__ : int ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowerCAmelCase__ = F"""facebook/wmt19-{pair}"""
lowerCAmelCase__ = self.get_tokenizer(snake_case__ )
lowerCAmelCase__ = self.get_model(snake_case__ )
lowerCAmelCase__ = bleu_data[pair]["""src"""]
lowerCAmelCase__ = bleu_data[pair]["""tgt"""]
lowerCAmelCase__ = tokenizer(snake_case__ , return_tensors="""pt""" , truncation=snake_case__ , padding="""longest""" ).to(snake_case__ )
lowerCAmelCase__ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCAmelCase__ = tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
lowerCAmelCase__ = calculate_bleu(snake_case__ , snake_case__ )
print(snake_case__ )
self.assertGreaterEqual(scores["""bleu"""] , snake_case__ )
| 674 | 1 |
"""simple docstring"""
__lowerCAmelCase : List[str] = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 674 | """simple docstring"""
import pprint
import requests
__lowerCAmelCase : Union[str, Any] = "https://zenquotes.io/api"
def _UpperCAmelCase ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def _UpperCAmelCase ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = random_quotes()
pprint.pprint(response)
| 674 | 1 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(snake_case__ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(snake_case__ ) )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(snake_case__ ) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(snake_case__ ) )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(snake_case__ ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
lowerCAmelCase__ = """fp16"""
self.assertTrue(is_safetensors_compatible(snake_case__ , variant=snake_case__ ) )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
lowerCAmelCase__ = """fp16"""
self.assertTrue(is_safetensors_compatible(snake_case__ , variant=snake_case__ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
# pass variant but use the non-variant filenames
lowerCAmelCase__ = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
lowerCAmelCase__ = """fp16"""
self.assertTrue(is_safetensors_compatible(snake_case__ , variant=snake_case__ ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowerCAmelCase__ = """fp16"""
self.assertFalse(is_safetensors_compatible(snake_case__ , variant=snake_case__ ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
lowerCAmelCase__ = """fp16"""
self.assertTrue(is_safetensors_compatible(snake_case__ , variant=snake_case__ ) )
def _SCREAMING_SNAKE_CASE ( self : Any ):
# pass variant but use the non-variant filenames
lowerCAmelCase__ = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
lowerCAmelCase__ = """fp16"""
self.assertTrue(is_safetensors_compatible(snake_case__ , variant=snake_case__ ) )
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
lowerCAmelCase__ = """fp16"""
self.assertFalse(is_safetensors_compatible(snake_case__ , variant=snake_case__ ) )
| 674 | """simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = 0
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ).to_dict()
config_dict.pop("""image_processor_type""" )
lowerCAmelCase__ = CLIPImageProcessor(**snake_case__ )
# save in new folder
model_config.save_pretrained(snake_case__ )
config.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
# make sure private variable is not incorrectly saved
lowerCAmelCase__ = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
with self.assertRaisesRegex(
snake_case__ , """clip-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""clip-base""" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
with self.assertRaisesRegex(
snake_case__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , revision="""aaaaaa""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
with self.assertRaisesRegex(
snake_case__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def _SCREAMING_SNAKE_CASE ( self : Any ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , trust_remote_code=snake_case__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoImageProcessor.register(snake_case__ , snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoImageProcessor.register(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = CustomImageProcessor.from_pretrained(snake_case__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Tuple = True
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoImageProcessor.register(snake_case__ , snake_case__ )
# If remote code is not set, the default is to use local
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(snake_case__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 674 | 1 |
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowerCAmelCase : List[str] = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__lowerCAmelCase : Any = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__lowerCAmelCase : Union[str, Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : List[List[List[str]]] , snake_case__ : List[List[str]] , snake_case__ : int = 1 , snake_case__ : int = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=snake_case__ , hypotheses=snake_case__ , min_len=snake_case__ , max_len=snake_case__ )
}
| 674 | """simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a_ :
def __init__( self : Optional[int] , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=64 , snake_case__ : Any=None ):
lowerCAmelCase__ = np.random.default_rng(snake_case__ )
lowerCAmelCase__ = length
lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Optional[Any] ):
return self.length
def __getitem__( self : List[str] , snake_case__ : Optional[int] ):
return {"x": self.x[i], "y": self.y[i]}
class a_ ( torch.nn.Module ):
def __init__( self : List[str] , snake_case__ : str=0 , snake_case__ : Dict=0 , snake_case__ : Any=False ):
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = True
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Any=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a[0] + self.b[0]
class a_ ( torch.nn.Module ):
def __init__( self : Any , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=0 , snake_case__ : List[Any]=False ):
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() )
lowerCAmelCase__ = True
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any]=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a + self.b
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase__ = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
lowerCAmelCase__ = load_dataset("""csv""" , data_files=lowerCamelCase__ )
lowerCAmelCase__ = datasets["""train"""].unique("""label""" )
lowerCAmelCase__ = {v: i for i, v in enumerate(lowerCamelCase__ )}
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" )
if "label" in examples:
lowerCAmelCase__ = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCamelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=2 )
lowerCAmelCase__ = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 674 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : List[Any] = "philschmid/bart-large-cnn-samsum"
UpperCamelCase_ : List[Any] = (
"This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, "
"and returns a summary of the text."
)
UpperCamelCase_ : int = "summarizer"
UpperCamelCase_ : Any = AutoTokenizer
UpperCamelCase_ : Optional[Any] = AutoModelForSeqaSeqLM
UpperCamelCase_ : Optional[Any] = ["text"]
UpperCamelCase_ : Dict = ["text"]
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : str ):
return self.pre_processor(snake_case__ , return_tensors="""pt""" , truncation=snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Optional[Any] ):
return self.model.generate(**snake_case__ )[0]
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Any ):
return self.pre_processor.decode(snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
| 674 | """simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = MobileBertConfig.from_json_file(lowerCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = MobileBertForPreTraining(lowerCamelCase__ )
# Load weights from tf checkpoint
lowerCAmelCase__ = load_tf_weights_in_mobilebert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 674 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class a_ :
def __init__( self : List[Any] , snake_case__ : List[Any] , snake_case__ : List[str]=2 , snake_case__ : Tuple=True , snake_case__ : Optional[Any]=False , snake_case__ : List[Any]=10 , snake_case__ : str=3 , snake_case__ : Any=32 * 4 , snake_case__ : int=32 * 6 , snake_case__ : int=4 , snake_case__ : List[Any]=32 , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_auxiliary_loss
lowerCAmelCase__ = num_queries
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = min_size
lowerCAmelCase__ = max_size
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = mask_feature_size
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
snake_case__ )
lowerCAmelCase__ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case__ )
lowerCAmelCase__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case__ ) > 0.5
).float()
lowerCAmelCase__ = (torch.rand((self.batch_size, self.num_labels) , device=snake_case__ ) > 0.5).long()
lowerCAmelCase__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _SCREAMING_SNAKE_CASE ( self : int ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : int , snake_case__ : Any ):
lowerCAmelCase__ = output.encoder_hidden_states
lowerCAmelCase__ = output.pixel_decoder_hidden_states
lowerCAmelCase__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case__ ) , config.decoder_config.decoder_layers )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Dict=False ):
with torch.no_grad():
lowerCAmelCase__ = MaskFormerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase__ = model(pixel_values=snake_case__ , pixel_mask=snake_case__ )
lowerCAmelCase__ = model(snake_case__ , output_hidden_states=snake_case__ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : Union[str, Any] ):
lowerCAmelCase__ = MaskFormerForInstanceSegmentation(config=snake_case__ )
model.to(snake_case__ )
model.eval()
def comm_check_on_output(snake_case__ : int ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase__ = model(pixel_values=snake_case__ , pixel_mask=snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
comm_check_on_output(snake_case__ )
lowerCAmelCase__ = model(
pixel_values=snake_case__ , pixel_mask=snake_case__ , mask_labels=snake_case__ , class_labels=snake_case__ )
comm_check_on_output(snake_case__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCamelCase_ : Optional[Any] = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : List[str] = False
UpperCamelCase_ : List[str] = False
UpperCamelCase_ : Dict = False
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = MaskFormerModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case__ , **snake_case__ , output_hidden_states=snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*snake_case__ )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def _SCREAMING_SNAKE_CASE ( self : str ):
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCAmelCase__ = MaskFormerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = (self.model_tester.min_size,) * 2
lowerCAmelCase__ = {
"""pixel_values""": torch.randn((2, 3, *size) , device=snake_case__ ),
"""mask_labels""": torch.randn((2, 10, *size) , device=snake_case__ ),
"""class_labels""": torch.zeros(2 , 10 , device=snake_case__ ).long(),
}
lowerCAmelCase__ = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(snake_case__ )
lowerCAmelCase__ = model(**snake_case__ )
self.assertTrue(outputs.loss is not None )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case__ , **snake_case__ , output_hidden_states=snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ ).to(snake_case__ )
lowerCAmelCase__ = model(**snake_case__ , output_attentions=snake_case__ )
self.assertTrue(outputs.attentions is not None )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase__ = self.all_model_classes[1]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
lowerCAmelCase__ = model(snake_case__ , mask_labels=snake_case__ , class_labels=snake_case__ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase__ = self.all_model_classes[1]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
lowerCAmelCase__ = model(snake_case__ , mask_labels=snake_case__ , class_labels=snake_case__ )
lowerCAmelCase__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCAmelCase__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowerCAmelCase : List[str] = 1e-4
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class a_ ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int ):
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(snake_case__ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(snake_case__ , return_tensors="""pt""" ).to(snake_case__ )
lowerCAmelCase__ = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case__ , (1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase__ = model(**snake_case__ )
lowerCAmelCase__ = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(snake_case__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case__ , atol=snake_case__ ) )
lowerCAmelCase__ = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(snake_case__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case__ , atol=snake_case__ ) )
lowerCAmelCase__ = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(snake_case__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case__ , atol=snake_case__ ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(snake_case__ )
.eval()
)
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(snake_case__ , return_tensors="""pt""" ).to(snake_case__ )
lowerCAmelCase__ = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case__ , (1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase__ = model(**snake_case__ )
# masks_queries_logits
lowerCAmelCase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase__ = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
lowerCAmelCase__ = torch.tensor(snake_case__ ).to(snake_case__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case__ , atol=snake_case__ ) )
# class_queries_logits
lowerCAmelCase__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase__ = torch.tensor(
[
[1.6_512E00, -5.2_572E00, -3.3_519E00],
[3.6_169E-02, -5.9_025E00, -2.9_313E00],
[1.0_766E-04, -7.7_630E00, -5.1_263E00],
] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case__ , atol=snake_case__ ) )
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(snake_case__ )
.eval()
)
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(snake_case__ , return_tensors="""pt""" ).to(snake_case__ )
lowerCAmelCase__ = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case__ , (1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase__ = model(**snake_case__ )
# masks_queries_logits
lowerCAmelCase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase__ = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
lowerCAmelCase__ = torch.tensor(snake_case__ ).to(snake_case__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case__ , atol=snake_case__ ) )
# class_queries_logits
lowerCAmelCase__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase__ = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case__ , atol=snake_case__ ) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(snake_case__ )
.eval()
)
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
lowerCAmelCase__ = inputs["""pixel_values"""].to(snake_case__ )
lowerCAmelCase__ = [el.to(snake_case__ ) for el in inputs["""mask_labels"""]]
lowerCAmelCase__ = [el.to(snake_case__ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
lowerCAmelCase__ = model(**snake_case__ )
self.assertTrue(outputs.loss is not None )
| 674 | """simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
lowerCAmelCase__ = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(lowerCamelCase__ )
else:
lowerCAmelCase__ = sylvester(number - 1 )
lowerCAmelCase__ = num - 1
lowerCAmelCase__ = num
return lower * upper + 1
if __name__ == "__main__":
print(F"The 8th number in Sylvester's sequence: {sylvester(8)}")
| 674 | 1 |
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
__lowerCAmelCase : List[str] = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__lowerCAmelCase : Union[str, Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = []
for i in range(len(lowerCamelCase__ ) ):
lowerCAmelCase__ = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowerCAmelCase__ = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowerCamelCase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowerCamelCase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowerCamelCase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowerCAmelCase__ = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowerCamelCase__ )
return next_generation
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = []
for _ in range(lowerCamelCase__ ):
# Create output image
lowerCAmelCase__ = Image.new("""RGB""" , (len(cells[0] ), len(lowerCamelCase__ )) )
lowerCAmelCase__ = img.load()
# Save cells to image
for x in range(len(lowerCamelCase__ ) ):
for y in range(len(cells[0] ) ):
lowerCAmelCase__ = 255 - cells[y][x] * 255
lowerCAmelCase__ = (colour, colour, colour)
# Save image
images.append(lowerCamelCase__ )
lowerCAmelCase__ = new_generation(lowerCamelCase__ )
return images
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = generate_images(GLIDER, 16)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| 674 | """simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Tuple = PegasusTokenizer
UpperCamelCase_ : Any = PegasusTokenizerFast
UpperCamelCase_ : int = True
UpperCamelCase_ : Any = True
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = PegasusTokenizer(snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : Optional[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any] ):
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = """</s>"""
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(snake_case__ ) , 1103 )
def _SCREAMING_SNAKE_CASE ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCAmelCase__ = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
lowerCAmelCase__ = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowerCAmelCase__ = """To ensure a smooth flow of bank resolutions."""
lowerCAmelCase__ = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = ["""This is going to be way too long.""" * 150, """short example"""]
lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
lowerCAmelCase__ = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
@slow
def _SCREAMING_SNAKE_CASE ( self : str ):
# fmt: off
lowerCAmelCase__ = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = PegasusTokenizer
UpperCamelCase_ : Optional[int] = PegasusTokenizerFast
UpperCamelCase_ : Union[str, Any] = True
UpperCamelCase_ : Optional[int] = True
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = PegasusTokenizer(snake_case__ , offset=0 , mask_token_sent=snake_case__ , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict ):
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : List[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Dict ):
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = ["""This is going to be way too long.""" * 1000, """short example"""]
lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
lowerCAmelCase__ = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
lowerCAmelCase__ = self._large_tokenizer(snake_case__ ).input_ids
self.assertListEqual(
snake_case__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 674 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class a_ ( unittest.TestCase ):
def __init__( self : Tuple , snake_case__ : List[Any] , snake_case__ : List[Any]=13 , snake_case__ : List[Any]=7 , snake_case__ : int=True , snake_case__ : List[Any]=True , snake_case__ : str=True , snake_case__ : Any=True , snake_case__ : List[Any]=99 , snake_case__ : Any=32 , snake_case__ : Dict=5 , snake_case__ : List[str]=4 , snake_case__ : Tuple=37 , snake_case__ : Dict="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : Any=0.1 , snake_case__ : Optional[Any]=512 , snake_case__ : Union[str, Any]=16 , snake_case__ : Optional[Any]=2 , snake_case__ : int=0.02 , snake_case__ : str=4 , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_attention_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_choices
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_attention_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = True
lowerCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Tuple = True
UpperCamelCase_ : int = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = FlaxBertModelTester(self )
@slow
def _SCREAMING_SNAKE_CASE ( self : str ):
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
lowerCAmelCase__ = FlaxBertModel.from_pretrained("""bert-base-cased""" )
lowerCAmelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case__ )
| 674 | """simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : int ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case__ , )
assert hasattr(self , """env""" )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Optional[Any] ):
# configuration for running training on smdistributed Model Parallel
lowerCAmelCase__ = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowerCAmelCase__ = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowerCAmelCase__ = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowerCAmelCase__ = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="""py36""" , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : str ):
TrainingJobAnalytics(snake_case__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : List[str] ):
# create estimator
lowerCAmelCase__ = self.create_estimator(snake_case__ )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case__ )
| 674 | 1 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Union[str, Any] = XLNetTokenizer
UpperCamelCase_ : Optional[int] = XLNetTokenizerFast
UpperCamelCase_ : int = True
UpperCamelCase_ : Union[str, Any] = True
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = XLNetTokenizer(snake_case__ , keep_accents=snake_case__ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = """<s>"""
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<eod>""" )
self.assertEqual(len(snake_case__ ) , 1006 )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = XLNetTokenizer(snake_case__ , keep_accents=snake_case__ )
lowerCAmelCase__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(snake_case__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [285, 46, 10, 170, 382] )
lowerCAmelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(snake_case__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = XLNetTokenizer(snake_case__ , do_lower_case=snake_case__ )
lowerCAmelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = XLNetTokenizer(snake_case__ , do_lower_case=snake_case__ )
lowerCAmelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
lowerCAmelCase__ = tokenizer.encode("""sequence builders""" , add_special_tokens=snake_case__ )
lowerCAmelCase__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=snake_case__ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(snake_case__ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
# fmt: off
lowerCAmelCase__ = {"""input_ids""": [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 674 | """simple docstring"""
from math import pi, sqrt
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_71.5:
raise OverflowError("""math range error""" )
elif num - int(lowerCamelCase__ ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(lowerCamelCase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _UpperCAmelCase ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(lowerCamelCase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowerCAmelCase : Dict = 1.0
while num:
__lowerCAmelCase : Any = float(input("Gamma of: "))
print(F"gamma({num}) = {gamma(num)}")
print("\nEnter 0 to exit...")
| 674 | 1 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__lowerCAmelCase : List[Any] = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
__lowerCAmelCase : List[str] = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
__lowerCAmelCase : Tuple = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return float((preds == labels).mean() )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="binary" ):
"""simple docstring"""
lowerCAmelCase__ = simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ , average=lowerCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = {}
for id_pred, label in zip(lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase__ = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
lowerCAmelCase__ = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase__ = [(pred, label)]
lowerCAmelCase__ , lowerCAmelCase__ = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase__ , lowerCAmelCase__ = zip(*lowerCamelCase__ )
lowerCAmelCase__ = fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ , average="""macro""" )
fas.append(lowerCamelCase__ )
lowerCAmelCase__ = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase__ ) )
ems.append(lowerCamelCase__ )
lowerCAmelCase__ = float(sum(lowerCamelCase__ ) / len(lowerCamelCase__ ) )
lowerCAmelCase__ = sum(lowerCamelCase__ ) / len(lowerCamelCase__ )
lowerCAmelCase__ = float(fa_score(y_true=lowerCamelCase__ , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self : Any ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : int , snake_case__ : Union[str, Any] ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(snake_case__ , snake_case__ )}
elif self.config_name == "cb":
return acc_and_fa(snake_case__ , snake_case__ , fa_avg="""macro""" )
elif self.config_name == "record":
lowerCAmelCase__ = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
lowerCAmelCase__ = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(snake_case__ , snake_case__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(snake_case__ , snake_case__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(snake_case__ , snake_case__ )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 674 | """simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
def __init__( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Any=13 , snake_case__ : int=30 , snake_case__ : int=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : Dict=True , snake_case__ : Optional[int]=True , snake_case__ : List[Any]=32 , snake_case__ : List[str]=2 , snake_case__ : Optional[Any]=4 , snake_case__ : Optional[int]=37 , snake_case__ : Tuple="gelu" , snake_case__ : str=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=10 , snake_case__ : Dict=0.02 , snake_case__ : Union[str, Any]=3 , snake_case__ : str=None , snake_case__ : List[Any]=2 , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scope
lowerCAmelCase__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = num_patches + 2
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
lowerCAmelCase__ = TFDeiTModel(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict ):
lowerCAmelCase__ = TFDeiTForMaskedImageModeling(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = TFDeiTForMaskedImageModeling(snake_case__ )
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Tuple ):
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ )
lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ )
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : Any = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : int = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = TFDeiTModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _SCREAMING_SNAKE_CASE ( self : Any ):
pass
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Dense ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ )
lowerCAmelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Union[str, Any]=False ):
lowerCAmelCase__ = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDeiTModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any ):
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=snake_case__ , return_tensors="""tf""" )
# forward pass
lowerCAmelCase__ = model(**snake_case__ )
# verify the logits
lowerCAmelCase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase__ = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
| 674 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : UNetaDModel
UpperCamelCase_ : ScoreSdeVeScheduler
def __init__( self : Optional[Any] , snake_case__ : UNetaDModel , snake_case__ : ScoreSdeVeScheduler ):
super().__init__()
self.register_modules(unet=snake_case__ , scheduler=snake_case__ )
@torch.no_grad()
def __call__( self : Dict , snake_case__ : int = 1 , snake_case__ : int = 2000 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , **snake_case__ : int , ):
lowerCAmelCase__ = self.unet.config.sample_size
lowerCAmelCase__ = (batch_size, 3, img_size, img_size)
lowerCAmelCase__ = self.unet
lowerCAmelCase__ = randn_tensor(snake_case__ , generator=snake_case__ ) * self.scheduler.init_noise_sigma
lowerCAmelCase__ = sample.to(self.device )
self.scheduler.set_timesteps(snake_case__ )
self.scheduler.set_sigmas(snake_case__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCAmelCase__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowerCAmelCase__ = self.unet(snake_case__ , snake_case__ ).sample
lowerCAmelCase__ = self.scheduler.step_correct(snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# prediction step
lowerCAmelCase__ = model(snake_case__ , snake_case__ ).sample
lowerCAmelCase__ = self.scheduler.step_pred(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ )
lowerCAmelCase__ , lowerCAmelCase__ = output.prev_sample, output.prev_sample_mean
lowerCAmelCase__ = sample_mean.clamp(0 , 1 )
lowerCAmelCase__ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase__ = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=snake_case__ )
| 674 | """simple docstring"""
from __future__ import annotations
from math import gcd
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 2 , lowerCamelCase__ = 1 , lowerCamelCase__ = 3 , ):
"""simple docstring"""
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
return (pow(lowerCamelCase__ , 2 ) + step) % modulus
for _ in range(lowerCamelCase__ ):
# These track the position within the cycle detection logic.
lowerCAmelCase__ = seed
lowerCAmelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCAmelCase__ = gcd(hare - tortoise , lowerCamelCase__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCAmelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
__lowerCAmelCase : List[str] = parser.parse_args()
__lowerCAmelCase : Dict = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"{args.num} is probably prime")
else:
__lowerCAmelCase : List[str] = args.num // divisor
print(F"{args.num} = {divisor} * {quotient}")
| 674 | 1 |
"""simple docstring"""
from typing import Any
import numpy as np
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return np.array_equal(lowerCamelCase__ , matrix.conjugate().T )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = v.conjugate().T
lowerCAmelCase__ = v_star.dot(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , np.ndarray )
return (v_star_dot.dot(lowerCamelCase__ )) / (v_star.dot(lowerCamelCase__ ))
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
lowerCAmelCase__ = np.array([[1], [2], [3]] )
assert is_hermitian(lowerCamelCase__ ), f"""{a} is not hermitian."""
print(rayleigh_quotient(lowerCamelCase__ , lowerCamelCase__ ) )
lowerCAmelCase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowerCamelCase__ ), f"""{a} is not hermitian."""
assert rayleigh_quotient(lowerCamelCase__ , lowerCamelCase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 674 | """simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = TapasConfig.from_json_file(lowerCamelCase__ )
# set absolute/relative position embeddings parameter
lowerCAmelCase__ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
lowerCAmelCase__ = 4
lowerCAmelCase__ = True
# hparam_utils.py hparams
lowerCAmelCase__ = 0.66_46_94
lowerCAmelCase__ = 0.20_79_51
lowerCAmelCase__ = 0.12_11_94
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = 0.0_35_25_13
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowerCAmelCase__ = 4
lowerCAmelCase__ = False
# hparam_utils.py hparams
lowerCAmelCase__ = 36.45_19
lowerCAmelCase__ = 0.90_34_21
lowerCAmelCase__ = 2_22.0_88
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = 0.76_31_41
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "TABFACT":
lowerCAmelCase__ = TapasForSequenceClassification(config=lowerCamelCase__ )
elif task == "MLM":
lowerCAmelCase__ = TapasForMaskedLM(config=lowerCamelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
lowerCAmelCase__ = TapasModel(config=lowerCamelCase__ )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCamelCase__ )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
lowerCAmelCase__ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 )
tokenizer.save_pretrained(lowerCamelCase__ )
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 674 | 1 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__lowerCAmelCase : Tuple = object()
# For specifying empty leaf dict `{}`
__lowerCAmelCase : int = object()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(lowerCamelCase__ ) - len(lowerCamelCase__ ) + 1 ):
lowerCAmelCase__ = [x.match(lowerCamelCase__ ) for x, y in zip(lowerCamelCase__ , ks[i:] )]
if matches and all(lowerCamelCase__ ):
return True
return False
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
def replace(lowerCamelCase__ , lowerCamelCase__ ):
for rule, replacement in rules:
if _match(lowerCamelCase__ , lowerCamelCase__ ):
return replacement
return val
return replace
def _UpperCAmelCase ( ):
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , lowerCamelCase__ )),
(("transformer", "wte", "embedding"), P("""mp""" , lowerCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCamelCase__ , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , lowerCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowerCamelCase__ , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , lowerCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = _get_partition_rules()
lowerCAmelCase__ = _replacement_rules(lowerCamelCase__ )
lowerCAmelCase__ = {k: _unmatched for k in flatten_dict(lowerCamelCase__ )}
lowerCAmelCase__ = {k: replace(lowerCamelCase__ , lowerCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowerCamelCase__ ) )
| 674 | """simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ = 50 ):
"""simple docstring"""
lowerCAmelCase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"{solution() = }")
| 674 | 1 |
"""simple docstring"""
from __future__ import annotations
class a_ :
def __init__( self : str , snake_case__ : List[str]=None ):
lowerCAmelCase__ = data
lowerCAmelCase__ = None
def __repr__( self : List[Any] ):
lowerCAmelCase__ = []
lowerCAmelCase__ = self
while temp:
string_rep.append(F"""{temp.data}""" )
lowerCAmelCase__ = temp.next
return "->".join(snake_case__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if not elements_list:
raise Exception("""The Elements List is empty""" )
lowerCAmelCase__ = lowerCAmelCase__ = Node(elements_list[0] )
for i in range(1 , len(lowerCamelCase__ ) ):
lowerCAmelCase__ = Node(elements_list[i] )
lowerCAmelCase__ = current.next
return head
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if head_node is not None and isinstance(lowerCamelCase__ , lowerCamelCase__ ):
print_reverse(head_node.next )
print(head_node.data )
def _UpperCAmelCase ( ):
"""simple docstring"""
from doctest import testmod
testmod()
lowerCAmelCase__ = make_linked_list([14, 52, 14, 12, 43] )
print("""Linked List:""" )
print(lowerCamelCase__ )
print("""Elements in Reverse:""" )
print_reverse(lowerCamelCase__ )
if __name__ == "__main__":
main()
| 674 | """simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : Any = "The Nymphenburg Palace is a beautiful palace in Munich!"
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = {
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 1024,
"""hidden_size""": 768,
"""max_length""": 512,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 1024,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1e-5,
"""token_type_vocab_size""": 2,
}
lowerCAmelCase__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowerCAmelCase__ = BERTEncoder(
attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=lowerCamelCase__ , output_all_encodings=lowerCamelCase__ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , lowerCamelCase__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowerCAmelCase__ = """openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
lowerCAmelCase__ = os.path.join(get_home_dir() , """models""" )
lowerCAmelCase__ = _load_vocab(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , cls=lowerCamelCase__ )
lowerCAmelCase__ = nlp.model.BERTModel(
lowerCamelCase__ , len(lowerCamelCase__ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=lowerCamelCase__ , use_token_type_embed=lowerCamelCase__ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=lowerCamelCase__ , use_decoder=lowerCamelCase__ , )
original_bort.load_parameters(lowerCamelCase__ , cast_dtype=lowerCamelCase__ , ignore_extra=lowerCamelCase__ )
lowerCAmelCase__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
lowerCAmelCase__ = {
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.02,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(lowerCamelCase__ ),
}
lowerCAmelCase__ = BertConfig.from_dict(lowerCamelCase__ )
lowerCAmelCase__ = BertForMaskedLM(lowerCamelCase__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase__ = hf_param.shape
lowerCAmelCase__ = to_torch(params[gluon_param] )
lowerCAmelCase__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowerCAmelCase__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowerCAmelCase__ = hf_bort_model.bert.encoder.layer[i]
# self attention
lowerCAmelCase__ = layer.attention.self
lowerCAmelCase__ = check_and_map_params(
self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
lowerCAmelCase__ = layer.attention.output
lowerCAmelCase__ = check_and_map_params(
self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
lowerCAmelCase__ = layer.intermediate
lowerCAmelCase__ = check_and_map_params(
intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
lowerCAmelCase__ = check_and_map_params(
intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
lowerCAmelCase__ = layer.output
lowerCAmelCase__ = check_and_map_params(
bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
lowerCAmelCase__ = RobertaTokenizer.from_pretrained("""roberta-base""" )
lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ )["""input_ids"""]
# Get gluon output
lowerCAmelCase__ = mx.nd.array([input_ids] )
lowerCAmelCase__ = original_bort(inputs=lowerCamelCase__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase__ )
lowerCAmelCase__ = BertModel.from_pretrained(lowerCamelCase__ )
hf_bort_model.eval()
lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ , return_tensors="""pt""" )
lowerCAmelCase__ = hf_bort_model(**lowerCamelCase__ )[0]
lowerCAmelCase__ = output_gluon[0].asnumpy()
lowerCAmelCase__ = output_hf[0].detach().numpy()
lowerCAmelCase__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowerCAmelCase__ = np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
if success:
print("""✔️ Both model do output the same tensors""" )
else:
print("""❌ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""" , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : str = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 674 | 1 |
"""simple docstring"""
import os
__lowerCAmelCase : Any = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 1_00, "D": 5_00, "M": 10_00}
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
while index < len(lowerCamelCase__ ) - 1:
lowerCAmelCase__ = SYMBOLS[numerals[index]]
lowerCAmelCase__ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = """"""
lowerCAmelCase__ = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCAmelCase__ = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCAmelCase__ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _UpperCAmelCase ( lowerCamelCase__ = "/p089_roman.txt" ):
"""simple docstring"""
lowerCAmelCase__ = 0
with open(os.path.dirname(lowerCamelCase__ ) + roman_numerals_filename ) as filea:
lowerCAmelCase__ = filea.readlines()
for line in lines:
lowerCAmelCase__ = line.strip()
lowerCAmelCase__ = parse_roman_numerals(lowerCamelCase__ )
lowerCAmelCase__ = generate_roman_numerals(lowerCamelCase__ )
savings += len(lowerCamelCase__ ) - len(lowerCamelCase__ )
return savings
if __name__ == "__main__":
print(F"{solution() = }")
| 674 | """simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self : Optional[int] ):
lowerCAmelCase__ = """"""
lowerCAmelCase__ = """"""
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = 256
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Union[str, Any] ):
lowerCAmelCase__ = cva.imread(snake_case__ , 0 )
lowerCAmelCase__ = copy.deepcopy(self.img )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="""x""" )
lowerCAmelCase__ = np.sum(snake_case__ )
for i in range(len(snake_case__ ) ):
lowerCAmelCase__ = x[i] / self.k
self.sk += prk
lowerCAmelCase__ = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase__ = int(last % last )
lowerCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case__ )
lowerCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase__ = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase__ = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
__lowerCAmelCase : Dict = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
__lowerCAmelCase : Optional[int] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 674 | 1 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : List[Any] = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 674 | """simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : List[str] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCamelCase_ : str = "CIDAS/clipseg-rd64-refined"
UpperCamelCase_ : Any = "image_segmenter"
UpperCamelCase_ : Optional[Any] = CLIPSegForImageSegmentation
UpperCamelCase_ : List[str] = ["image", "text"]
UpperCamelCase_ : int = ["image"]
def __init__( self : Tuple , *snake_case__ : str , **snake_case__ : Optional[Any] ):
requires_backends(self , ["""vision"""] )
super().__init__(*snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : "Image" , snake_case__ : str ):
return self.pre_processor(text=[label] , images=[image] , padding=snake_case__ , return_tensors="""pt""" )
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Tuple ):
with torch.no_grad():
lowerCAmelCase__ = self.model(**snake_case__ ).logits
return logits
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : List[Any] ):
lowerCAmelCase__ = outputs.cpu().detach().numpy()
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 674 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Dict = TransfoXLTokenizer
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : Tuple = False
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setUp()
lowerCAmelCase__ = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **snake_case__ : Optional[int] ):
lowerCAmelCase__ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Optional[int] ):
lowerCAmelCase__ = """<unk> UNwanted , running"""
lowerCAmelCase__ = """<unk> unwanted, running"""
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=snake_case__ )
lowerCAmelCase__ = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(snake_case__ , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [0, 4, 8, 7] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = TransfoXLTokenizer(lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = TransfoXLTokenizer(lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = TransfoXLTokenizer(lower_case=snake_case__ )
lowerCAmelCase__ = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
lowerCAmelCase__ = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(snake_case__ ) , snake_case__ )
self.assertEqual(tokenizer.convert_tokens_to_string(snake_case__ ) , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = len(snake_case__ )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(snake_case__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 674 | """simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = LayoutLMTokenizer
UpperCamelCase_ : List[Any] = LayoutLMTokenizerFast
UpperCamelCase_ : Dict = True
UpperCamelCase_ : Any = True
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
super().setUp()
lowerCAmelCase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : int , **snake_case__ : Union[str, Any] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Tuple ):
lowerCAmelCase__ = """UNwant\u00E9d,running"""
lowerCAmelCase__ = """unwanted, running"""
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(snake_case__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [7, 4, 5, 10, 8, 9] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
| 674 | 1 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class a_ :
def __init__( self : Union[str, Any] ):
lowerCAmelCase__ = [2, 1, 2, -1]
lowerCAmelCase__ = [1, 2, 3, 4]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = len(self.first_signal )
lowerCAmelCase__ = len(self.second_signal )
lowerCAmelCase__ = max(snake_case__ , snake_case__ )
# create a zero matrix of max_length x max_length
lowerCAmelCase__ = [[0] * max_length for i in range(snake_case__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(snake_case__ ):
lowerCAmelCase__ = deque(self.second_signal )
rotated_signal.rotate(snake_case__ )
for j, item in enumerate(snake_case__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
lowerCAmelCase__ = np.matmul(np.transpose(snake_case__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(snake_case__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 674 | """simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__lowerCAmelCase : Any = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
def __init__( self : List[str] , snake_case__ : int = 14 ):
if group not in primes:
raise ValueError("""Unsupported Group""" )
lowerCAmelCase__ = primes[group]["""prime"""]
lowerCAmelCase__ = primes[group]["""generator"""]
lowerCAmelCase__ = int(hexlify(urandom(32 ) ) , base=16 )
def _SCREAMING_SNAKE_CASE ( self : Any ):
return hex(self.__private_key )[2:]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = pow(self.generator , self.__private_key , self.prime )
return hex(snake_case__ )[2:]
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(snake_case__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : str ):
lowerCAmelCase__ = int(snake_case__ , base=16 )
if not self.is_valid_public_key(snake_case__ ):
raise ValueError("""Invalid public key""" )
lowerCAmelCase__ = pow(snake_case__ , self.__private_key , self.prime )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case__ : int , snake_case__ : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(snake_case__ , (prime - 1) // 2 , snake_case__ ) == 1
)
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case__ : str , snake_case__ : str , snake_case__ : int = 14 ):
lowerCAmelCase__ = int(snake_case__ , base=16 )
lowerCAmelCase__ = int(snake_case__ , base=16 )
lowerCAmelCase__ = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(snake_case__ , snake_case__ ):
raise ValueError("""Invalid public key""" )
lowerCAmelCase__ = pow(snake_case__ , snake_case__ , snake_case__ )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 674 | 1 |
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = int(lowerCamelCase__ )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCamelCase__ )
lowerCAmelCase__ , lowerCAmelCase__ = divmod(lowerCamelCase__ , 2 )
return binary_recursive(lowerCamelCase__ ) + str(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = str(lowerCamelCase__ ).strip()
if not number:
raise ValueError("""No input value was provided""" )
lowerCAmelCase__ = """-""" if number.startswith("""-""" ) else """"""
lowerCAmelCase__ = number.lstrip("""-""" )
if not number.isnumeric():
raise ValueError("""Input value is not an integer""" )
return f"""{negative}0b{binary_recursive(int(lowerCamelCase__ ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 674 | """simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(weights[0] )
lowerCAmelCase__ = np.asarray(weights[1] )
lowerCAmelCase__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(weights[0] )
lowerCAmelCase__ = np.asarray(weights[1] )
lowerCAmelCase__ = np.asarray(weights[2] )
lowerCAmelCase__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = weights[0][0][0]
lowerCAmelCase__ = np.asarray(layer_norm_a[0] )
lowerCAmelCase__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# lsh weights + output
lowerCAmelCase__ = weights[0][1]
if len(lowerCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
else:
set_layer_weights_in_torch_local(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
# intermediate weighs
lowerCAmelCase__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase__ ) == 4:
lowerCAmelCase__ = intermediate_weights[2]
# layernorm 2
lowerCAmelCase__ = np.asarray(intermediate_weights[0][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# intermediate dense
lowerCAmelCase__ = np.asarray(intermediate_weights[1][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
# intermediate out
lowerCAmelCase__ = np.asarray(intermediate_weights[4][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = torch_model.reformer
# word embeds
lowerCAmelCase__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase__ ) , )
if isinstance(weights[3] , lowerCamelCase__ ):
lowerCAmelCase__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCAmelCase__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
lowerCAmelCase__ = nn.Parameter(torch.tensor(lowerCamelCase__ ) )
lowerCAmelCase__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCAmelCase__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# output layer norm
lowerCAmelCase__ = np.asarray(weights[7][0] )
lowerCAmelCase__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# output embeddings
lowerCAmelCase__ = np.asarray(weights[9][0] )
lowerCAmelCase__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = ReformerConfig.from_json_file(lowerCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = ReformerModelWithLMHead(lowerCamelCase__ )
with open(lowerCamelCase__ , """rb""" ) as f:
lowerCAmelCase__ = pickle.load(lowerCamelCase__ )["""weights"""]
set_model_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 674 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = """laion/clap-htsat-unfused"""
lowerCAmelCase__ = tempfile.mkdtemp()
def _SCREAMING_SNAKE_CASE ( self : int , **snake_case__ : Optional[int] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , **snake_case__ : Dict ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = ClapProcessor(tokenizer=snake_case__ , feature_extractor=snake_case__ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase__ = self.get_feature_extractor(do_normalize=snake_case__ , padding_value=1.0 )
lowerCAmelCase__ = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = ClapProcessor(tokenizer=snake_case__ , feature_extractor=snake_case__ )
lowerCAmelCase__ = floats_list((3, 1000) )
lowerCAmelCase__ = feature_extractor(snake_case__ , return_tensors="""np""" )
lowerCAmelCase__ = processor(audios=snake_case__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = ClapProcessor(tokenizer=snake_case__ , feature_extractor=snake_case__ )
lowerCAmelCase__ = """This is a test string"""
lowerCAmelCase__ = processor(text=snake_case__ )
lowerCAmelCase__ = tokenizer(snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = ClapProcessor(tokenizer=snake_case__ , feature_extractor=snake_case__ )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(snake_case__ )
lowerCAmelCase__ = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = ClapProcessor(tokenizer=snake_case__ , feature_extractor=snake_case__ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 674 | """simple docstring"""
import os
from math import logaa
def _UpperCAmelCase ( lowerCamelCase__ = "base_exp.txt" ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCamelCase__ ) , lowerCamelCase__ ) ) ):
lowerCAmelCase__ , lowerCAmelCase__ = list(map(lowerCamelCase__ , line.split(""",""" ) ) )
if x * logaa(lowerCamelCase__ ) > largest:
lowerCAmelCase__ = x * logaa(lowerCamelCase__ )
lowerCAmelCase__ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 674 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self : Union[str, Any] , snake_case__ : Dict , snake_case__ : str=7 , snake_case__ : str=3 , snake_case__ : Optional[Any]=30 , snake_case__ : Any=400 , snake_case__ : Union[str, Any]=True , snake_case__ : List[str]=None , snake_case__ : List[str]=True , snake_case__ : List[str]=[0.5, 0.5, 0.5] , snake_case__ : Any=[0.5, 0.5, 0.5] , snake_case__ : Dict=True , snake_case__ : Any=1 / 255 , snake_case__ : str=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean
lowerCAmelCase__ = image_std
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_pad
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str=False ):
if not batched:
lowerCAmelCase__ = image_inputs[0]
if isinstance(snake_case__ , Image.Image ):
lowerCAmelCase__ , lowerCAmelCase__ = image.size
else:
lowerCAmelCase__ , lowerCAmelCase__ = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase__ = int(self.size["""shortest_edge"""] * h / w )
lowerCAmelCase__ = self.size["""shortest_edge"""]
elif w > h:
lowerCAmelCase__ = self.size["""shortest_edge"""]
lowerCAmelCase__ = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCAmelCase__ = self.size["""shortest_edge"""]
lowerCAmelCase__ = self.size["""shortest_edge"""]
else:
lowerCAmelCase__ = []
for image in image_inputs:
lowerCAmelCase__ , lowerCAmelCase__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase__ = max(snake_case__ , key=lambda snake_case__ : item[0] )[0]
lowerCAmelCase__ = max(snake_case__ , key=lambda snake_case__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Dict = YolosImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = YolosImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , """image_mean""" ) )
self.assertTrue(hasattr(snake_case__ , """image_std""" ) )
self.assertTrue(hasattr(snake_case__ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case__ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case__ , """size""" ) )
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , snake_case__ )
lowerCAmelCase__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=snake_case__ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
pass
def _SCREAMING_SNAKE_CASE ( self : int ):
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
lowerCAmelCase__ = image_processing(snake_case__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ = image_processing(snake_case__ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ = image_processing(snake_case__ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
# Initialize image_processings
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
lowerCAmelCase__ = self.image_processing_class(do_resize=snake_case__ , do_normalize=snake_case__ , do_rescale=snake_case__ )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowerCAmelCase__ = image_processing_a.pad(snake_case__ , return_tensors="""pt""" )
lowerCAmelCase__ = image_processing_a(snake_case__ , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1E-4 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
# prepare image and target
lowerCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowerCAmelCase__ = json.loads(f.read() )
lowerCAmelCase__ = {"""image_id""": 39769, """annotations""": target}
# encode them
lowerCAmelCase__ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
lowerCAmelCase__ = image_processing(images=snake_case__ , annotations=snake_case__ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , snake_case__ )
lowerCAmelCase__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , snake_case__ , atol=1E-4 ) )
# verify area
lowerCAmelCase__ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , snake_case__ ) )
# verify boxes
lowerCAmelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , snake_case__ )
lowerCAmelCase__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , snake_case__ , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , snake_case__ ) )
# verify is_crowd
lowerCAmelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , snake_case__ ) )
# verify class_labels
lowerCAmelCase__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , snake_case__ ) )
# verify orig_size
lowerCAmelCase__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , snake_case__ ) )
# verify size
lowerCAmelCase__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , snake_case__ ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : int ):
# prepare image, target and masks_path
lowerCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowerCAmelCase__ = json.loads(f.read() )
lowerCAmelCase__ = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
lowerCAmelCase__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCAmelCase__ = YolosImageProcessor(format="""coco_panoptic""" )
lowerCAmelCase__ = image_processing(images=snake_case__ , annotations=snake_case__ , masks_path=snake_case__ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , snake_case__ )
lowerCAmelCase__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , snake_case__ , atol=1E-4 ) )
# verify area
lowerCAmelCase__ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , snake_case__ ) )
# verify boxes
lowerCAmelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , snake_case__ )
lowerCAmelCase__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , snake_case__ , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , snake_case__ ) )
# verify is_crowd
lowerCAmelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , snake_case__ ) )
# verify class_labels
lowerCAmelCase__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , snake_case__ ) )
# verify masks
lowerCAmelCase__ = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , snake_case__ )
# verify orig_size
lowerCAmelCase__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , snake_case__ ) )
# verify size
lowerCAmelCase__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , snake_case__ ) )
| 674 | """simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
while b:
lowerCAmelCase__ , lowerCAmelCase__ = b, a % b
return a
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCamelCase__ , a % b )
def _UpperCAmelCase ( ):
"""simple docstring"""
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 674 | 1 |
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = "ybelkada/fonts"
def _UpperCAmelCase ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
"""Pix2StructImageProcessor. Please upgrade torch.""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
requires_backends(lowerCamelCase__ , ["""torch"""] )
_check_torch_version()
lowerCAmelCase__ = image_tensor.unsqueeze(0 )
lowerCAmelCase__ = torch.nn.functional.unfold(lowerCamelCase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
lowerCAmelCase__ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , lowerCamelCase__ , lowerCamelCase__ , -1 )
lowerCAmelCase__ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 36 , lowerCamelCase__ = "black" , lowerCamelCase__ = "white" , lowerCamelCase__ = 5 , lowerCamelCase__ = 5 , lowerCamelCase__ = 5 , lowerCamelCase__ = 5 , lowerCamelCase__ = None , lowerCamelCase__ = None , ):
"""simple docstring"""
requires_backends(lowerCamelCase__ , """vision""" )
# Add new lines so that each line is no more than 80 characters.
lowerCAmelCase__ = textwrap.TextWrapper(width=80 )
lowerCAmelCase__ = wrapper.wrap(text=lowerCamelCase__ )
lowerCAmelCase__ = """\n""".join(lowerCamelCase__ )
if font_bytes is not None and font_path is None:
lowerCAmelCase__ = io.BytesIO(lowerCamelCase__ )
elif font_path is not None:
lowerCAmelCase__ = font_path
else:
lowerCAmelCase__ = hf_hub_download(lowerCamelCase__ , """Arial.TTF""" )
lowerCAmelCase__ = ImageFont.truetype(lowerCamelCase__ , encoding="""UTF-8""" , size=lowerCamelCase__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
lowerCAmelCase__ = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , lowerCamelCase__ ) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = temp_draw.textbbox((0, 0) , lowerCamelCase__ , lowerCamelCase__ )
# Create the actual image with a bit of padding around the text.
lowerCAmelCase__ = text_width + left_padding + right_padding
lowerCAmelCase__ = text_height + top_padding + bottom_padding
lowerCAmelCase__ = Image.new("""RGB""" , (image_width, image_height) , lowerCamelCase__ )
lowerCAmelCase__ = ImageDraw.Draw(lowerCamelCase__ )
draw.text(xy=(left_padding, top_padding) , text=lowerCamelCase__ , fill=lowerCamelCase__ , font=lowerCamelCase__ )
return image
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
requires_backends(lowerCamelCase__ , """vision""" )
# Convert to PIL image if necessary
lowerCAmelCase__ = to_pil_image(lowerCamelCase__ )
lowerCAmelCase__ = render_text(lowerCamelCase__ , **lowerCamelCase__ )
lowerCAmelCase__ = max(header_image.width , image.width )
lowerCAmelCase__ = int(image.height * (new_width / image.width) )
lowerCAmelCase__ = int(header_image.height * (new_width / header_image.width) )
lowerCAmelCase__ = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
lowerCAmelCase__ = to_numpy_array(lowerCamelCase__ )
if infer_channel_dimension_format(lowerCamelCase__ ) == ChannelDimension.LAST:
lowerCAmelCase__ = to_channel_dimension_format(lowerCamelCase__ , ChannelDimension.LAST )
return new_image
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Dict = ["flattened_patches"]
def __init__( self : Tuple , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : int = 2048 , snake_case__ : bool = False , **snake_case__ : Optional[Any] , ):
super().__init__(**snake_case__ )
lowerCAmelCase__ = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = do_convert_rgb
lowerCAmelCase__ = max_patches
lowerCAmelCase__ = is_vqa
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : np.ndarray , snake_case__ : int , snake_case__ : dict , **snake_case__ : Any ):
requires_backends(self.extract_flattened_patches , """torch""" )
_check_torch_version()
# convert to torch
lowerCAmelCase__ = to_channel_dimension_format(snake_case__ , ChannelDimension.FIRST )
lowerCAmelCase__ = torch.from_numpy(snake_case__ )
lowerCAmelCase__ , lowerCAmelCase__ = patch_size["""height"""], patch_size["""width"""]
lowerCAmelCase__ , lowerCAmelCase__ = get_image_size(snake_case__ )
# maximize scale s.t.
lowerCAmelCase__ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
lowerCAmelCase__ = max(min(math.floor(scale * image_height / patch_height ) , snake_case__ ) , 1 )
lowerCAmelCase__ = max(min(math.floor(scale * image_width / patch_width ) , snake_case__ ) , 1 )
lowerCAmelCase__ = max(num_feasible_rows * patch_height , 1 )
lowerCAmelCase__ = max(num_feasible_cols * patch_width , 1 )
lowerCAmelCase__ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=snake_case__ , antialias=snake_case__ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
lowerCAmelCase__ = torch_extract_patches(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase__ = patches.shape
lowerCAmelCase__ = patches_shape[1]
lowerCAmelCase__ = patches_shape[2]
lowerCAmelCase__ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
lowerCAmelCase__ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
lowerCAmelCase__ = torch.arange(snake_case__ ).reshape([rows, 1] ).repeat(1 , snake_case__ ).reshape([rows * columns, 1] )
lowerCAmelCase__ = torch.arange(snake_case__ ).reshape([1, columns] ).repeat(snake_case__ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
lowerCAmelCase__ = row_ids.to(torch.floataa )
lowerCAmelCase__ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
lowerCAmelCase__ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
lowerCAmelCase__ = torch.nn.functional.pad(snake_case__ , [0, 0, 0, max_patches - (rows * columns)] ).float()
lowerCAmelCase__ = to_numpy_array(snake_case__ )
return result
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : np.ndarray , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Optional[int] ):
if image.dtype == np.uinta:
lowerCAmelCase__ = image.astype(np.floataa )
# take mean across the whole `image`
lowerCAmelCase__ = np.mean(snake_case__ )
lowerCAmelCase__ = np.std(snake_case__ )
lowerCAmelCase__ = max(snake_case__ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : ImageInput , snake_case__ : Optional[str] = None , snake_case__ : bool = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[Dict[str, int]] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : ChannelDimension = ChannelDimension.FIRST , **snake_case__ : Optional[int] , ):
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase__ = patch_size if patch_size is not None else self.patch_size
lowerCAmelCase__ = max_patches if max_patches is not None else self.max_patches
lowerCAmelCase__ = self.is_vqa
if kwargs.get("""data_format""" , snake_case__ ) is not None:
raise ValueError("""data_format is not an accepted input as the outputs are """ )
lowerCAmelCase__ = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase__ = [convert_to_rgb(snake_case__ ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(snake_case__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("""A header text must be provided for VQA models.""" )
lowerCAmelCase__ = kwargs.pop("""font_bytes""" , snake_case__ )
lowerCAmelCase__ = kwargs.pop("""font_path""" , snake_case__ )
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase__ = [header_text] * len(snake_case__ )
lowerCAmelCase__ = [
render_header(snake_case__ , header_text[i] , font_bytes=snake_case__ , font_path=snake_case__ )
for i, image in enumerate(snake_case__ )
]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=snake_case__ ) for image in images]
# convert to torch tensor and permute
lowerCAmelCase__ = [
self.extract_flattened_patches(image=snake_case__ , max_patches=snake_case__ , patch_size=snake_case__ )
for image in images
]
# create attention mask in numpy
lowerCAmelCase__ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
lowerCAmelCase__ = BatchFeature(
data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=snake_case__ )
return encoded_outputs
| 674 | """simple docstring"""
import os
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = os.path.dirname(os.path.realpath(lowerCamelCase__ ) )
lowerCAmelCase__ = os.path.join(lowerCamelCase__ , """triangle.txt""" )
with open(lowerCamelCase__ ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = []
for line in triangle:
lowerCAmelCase__ = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(lowerCamelCase__ ) )
a.append(lowerCamelCase__ )
for i in range(1 , len(lowerCamelCase__ ) ):
for j in range(len(a[i] ) ):
lowerCAmelCase__ = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowerCAmelCase__ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowerCamelCase__ , lowerCamelCase__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 674 | 1 |
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__=False ):
"""simple docstring"""
try:
lowerCAmelCase__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCAmelCase__ = default
else:
# KEY is set, convert it to True or False.
try:
lowerCAmelCase__ = strtobool(lowerCamelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
__lowerCAmelCase : Optional[int] = parse_flag_from_env("RUN_SLOW", default=False)
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return unittest.skip("""Test was skipped""" )(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__=None , lowerCamelCase__=None ):
"""simple docstring"""
if test_case is None:
return partial(lowerCamelCase__ , version=lowerCamelCase__ )
return unittest.skipUnless(is_torch_version(""">=""" , lowerCamelCase__ ) , f"""test requires torch version >= {version}""" )(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(lowerCamelCase__ )
__lowerCAmelCase : Dict = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(lowerCamelCase__ )
class a_ ( unittest.TestCase ):
UpperCamelCase_ : Union[str, Any] = True
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ):
lowerCAmelCase__ = tempfile.mkdtemp()
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def _SCREAMING_SNAKE_CASE ( self : str ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(snake_case__ )
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Union[mock.Mock, List[mock.Mock]] ):
lowerCAmelCase__ = mocks if isinstance(snake_case__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = AcceleratorState()
lowerCAmelCase__ = tensor[None].clone().to(state.device )
lowerCAmelCase__ = gather(lowerCamelCase__ ).cpu()
lowerCAmelCase__ = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCamelCase__ ):
return False
return True
class a_ :
def __init__( self : Union[str, Any] , snake_case__ : Any , snake_case__ : Any , snake_case__ : Tuple ):
lowerCAmelCase__ = returncode
lowerCAmelCase__ = stdout
lowerCAmelCase__ = stderr
async def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
while True:
lowerCAmelCase__ = await stream.readline()
if line:
callback(lowerCamelCase__ )
else:
break
async def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=False ):
"""simple docstring"""
if echo:
print("""\nRunning: """ , """ """.join(lowerCamelCase__ ) )
lowerCAmelCase__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCamelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def tee(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="" ):
lowerCAmelCase__ = line.decode("""utf-8""" ).rstrip()
sink.append(lowerCamelCase__ )
if not quiet:
print(lowerCamelCase__ , lowerCamelCase__ , file=lowerCamelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCamelCase__ : tee(lowerCamelCase__ , lowerCamelCase__ , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCamelCase__ : tee(lowerCamelCase__ , lowerCamelCase__ , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=lowerCamelCase__ , )
return _RunOutput(await p.wait() , lowerCamelCase__ , lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=180 , lowerCamelCase__=False , lowerCamelCase__=True ):
"""simple docstring"""
lowerCAmelCase__ = asyncio.get_event_loop()
lowerCAmelCase__ = loop.run_until_complete(
_stream_subprocess(lowerCamelCase__ , env=lowerCamelCase__ , stdin=lowerCamelCase__ , timeout=lowerCamelCase__ , quiet=lowerCamelCase__ , echo=lowerCamelCase__ ) )
lowerCAmelCase__ = """ """.join(lowerCamelCase__ )
if result.returncode > 0:
lowerCAmelCase__ = """\n""".join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
return result
class a_ ( __UpperCamelCase ):
pass
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__=False ):
"""simple docstring"""
try:
lowerCAmelCase__ = subprocess.check_output(lowerCamelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCamelCase__ , """decode""" ):
lowerCAmelCase__ = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"""Command `{' '.join(lowerCamelCase__ )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 674 | """simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__lowerCAmelCase : Union[str, Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
__lowerCAmelCase : Optional[int] = json.load(f)
@require_torch
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Dict ):
return FSMTTokenizer.from_pretrained(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Any ):
lowerCAmelCase__ = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Any , snake_case__ : int ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowerCAmelCase__ = F"""facebook/wmt19-{pair}"""
lowerCAmelCase__ = self.get_tokenizer(snake_case__ )
lowerCAmelCase__ = self.get_model(snake_case__ )
lowerCAmelCase__ = bleu_data[pair]["""src"""]
lowerCAmelCase__ = bleu_data[pair]["""tgt"""]
lowerCAmelCase__ = tokenizer(snake_case__ , return_tensors="""pt""" , truncation=snake_case__ , padding="""longest""" ).to(snake_case__ )
lowerCAmelCase__ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCAmelCase__ = tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
lowerCAmelCase__ = calculate_bleu(snake_case__ , snake_case__ )
print(snake_case__ )
self.assertGreaterEqual(scores["""bleu"""] , snake_case__ )
| 674 | 1 |
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__lowerCAmelCase : Optional[Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__lowerCAmelCase : Optional[Any] = "main"
# Default branch name
__lowerCAmelCase : Tuple = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
__lowerCAmelCase : str = "aaaaaaa"
# This commit does not exist, so we should 404.
__lowerCAmelCase : Dict = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
__lowerCAmelCase : Union[str, Any] = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def _UpperCAmelCase ( ):
"""simple docstring"""
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def _UpperCAmelCase ( ):
"""simple docstring"""
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class a_ ( unittest.TestCase ):
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : Optional[Any] ):
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : List[str] ):
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : List[str] ):
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Dict ):
self.assertEqual(find_labels(snake_case__ ) , ["""labels"""] )
self.assertEqual(find_labels(snake_case__ ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(snake_case__ ) , ["""start_positions""", """end_positions"""] )
class a_ ( __UpperCamelCase ):
pass
self.assertEqual(find_labels(snake_case__ ) , ["""labels"""] )
@require_tf
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.assertEqual(find_labels(snake_case__ ) , ["""labels"""] )
self.assertEqual(find_labels(snake_case__ ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(snake_case__ ) , ["""start_positions""", """end_positions"""] )
class a_ ( __UpperCamelCase ):
pass
self.assertEqual(find_labels(snake_case__ ) , ["""labels"""] )
@require_flax
def _SCREAMING_SNAKE_CASE ( self : Any ):
# Flax models don't have labels
self.assertEqual(find_labels(snake_case__ ) , [] )
self.assertEqual(find_labels(snake_case__ ) , [] )
self.assertEqual(find_labels(snake_case__ ) , [] )
class a_ ( __UpperCamelCase ):
pass
self.assertEqual(find_labels(snake_case__ ) , [] )
| 674 | """simple docstring"""
import pprint
import requests
__lowerCAmelCase : Union[str, Any] = "https://zenquotes.io/api"
def _UpperCAmelCase ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def _UpperCAmelCase ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = random_quotes()
pprint.pprint(response)
| 674 | 1 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a_ ( __UpperCamelCase ):
def __init__( self : Optional[Any] , snake_case__ : UNetaDModel , snake_case__ : UNetaDModel , snake_case__ : DDPMScheduler , snake_case__ : Optional[Any] , ):
super().__init__()
lowerCAmelCase__ = value_function
lowerCAmelCase__ = unet
lowerCAmelCase__ = scheduler
lowerCAmelCase__ = env
lowerCAmelCase__ = env.get_dataset()
lowerCAmelCase__ = {}
for key in self.data.keys():
try:
lowerCAmelCase__ = self.data[key].mean()
except: # noqa: E722
pass
lowerCAmelCase__ = {}
for key in self.data.keys():
try:
lowerCAmelCase__ = self.data[key].std()
except: # noqa: E722
pass
lowerCAmelCase__ = env.observation_space.shape[0]
lowerCAmelCase__ = env.action_space.shape[0]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Tuple ):
return (x_in - self.means[key]) / self.stds[key]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Any , snake_case__ : Optional[int] ):
return x_in * self.stds[key] + self.means[key]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Union[str, Any] ):
if type(snake_case__ ) is dict:
return {k: self.to_torch(snake_case__ ) for k, v in x_in.items()}
elif torch.is_tensor(snake_case__ ):
return x_in.to(self.unet.device )
return torch.tensor(snake_case__ , device=self.unet.device )
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Any ):
for key, val in cond.items():
lowerCAmelCase__ = val.clone()
return x_in
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : str , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Dict ):
lowerCAmelCase__ = x.shape[0]
lowerCAmelCase__ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCAmelCase__ = torch.full((batch_size,) , snake_case__ , device=self.unet.device , dtype=torch.long )
for _ in range(snake_case__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCAmelCase__ = self.value_function(x.permute(0 , 2 , 1 ) , snake_case__ ).sample
lowerCAmelCase__ = torch.autograd.grad([y.sum()] , [x] )[0]
lowerCAmelCase__ = self.scheduler._get_variance(snake_case__ )
lowerCAmelCase__ = torch.exp(0.5 * posterior_variance )
lowerCAmelCase__ = model_std * grad
lowerCAmelCase__ = 0
lowerCAmelCase__ = x.detach()
lowerCAmelCase__ = x + scale * grad
lowerCAmelCase__ = self.reset_xa(snake_case__ , snake_case__ , self.action_dim )
lowerCAmelCase__ = self.unet(x.permute(0 , 2 , 1 ) , snake_case__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowerCAmelCase__ = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , predict_epsilon=snake_case__ )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
lowerCAmelCase__ = self.reset_xa(snake_case__ , snake_case__ , self.action_dim )
lowerCAmelCase__ = self.to_torch(snake_case__ )
return x, y
def __call__( self : int , snake_case__ : Optional[int] , snake_case__ : int=64 , snake_case__ : Dict=32 , snake_case__ : Any=2 , snake_case__ : Optional[Any]=0.1 ):
# normalize the observations and create batch dimension
lowerCAmelCase__ = self.normalize(snake_case__ , """observations""" )
lowerCAmelCase__ = obs[None].repeat(snake_case__ , axis=0 )
lowerCAmelCase__ = {0: self.to_torch(snake_case__ )}
lowerCAmelCase__ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCAmelCase__ = randn_tensor(snake_case__ , device=self.unet.device )
lowerCAmelCase__ = self.reset_xa(snake_case__ , snake_case__ , self.action_dim )
lowerCAmelCase__ = self.to_torch(snake_case__ )
# run the diffusion process
lowerCAmelCase__ , lowerCAmelCase__ = self.run_diffusion(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# sort output trajectories by value
lowerCAmelCase__ = y.argsort(0 , descending=snake_case__ ).squeeze()
lowerCAmelCase__ = x[sorted_idx]
lowerCAmelCase__ = sorted_values[:, :, : self.action_dim]
lowerCAmelCase__ = actions.detach().cpu().numpy()
lowerCAmelCase__ = self.de_normalize(snake_case__ , key="""actions""" )
# select the action with the highest value
if y is not None:
lowerCAmelCase__ = 0
else:
# if we didn't run value guiding, select a random action
lowerCAmelCase__ = np.random.randint(0 , snake_case__ )
lowerCAmelCase__ = denorm_actions[selected_index, 0]
return denorm_actions
| 674 | """simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = 0
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ).to_dict()
config_dict.pop("""image_processor_type""" )
lowerCAmelCase__ = CLIPImageProcessor(**snake_case__ )
# save in new folder
model_config.save_pretrained(snake_case__ )
config.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
# make sure private variable is not incorrectly saved
lowerCAmelCase__ = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
with self.assertRaisesRegex(
snake_case__ , """clip-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""clip-base""" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
with self.assertRaisesRegex(
snake_case__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , revision="""aaaaaa""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
with self.assertRaisesRegex(
snake_case__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def _SCREAMING_SNAKE_CASE ( self : Any ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , trust_remote_code=snake_case__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoImageProcessor.register(snake_case__ , snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoImageProcessor.register(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = CustomImageProcessor.from_pretrained(snake_case__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Tuple = True
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoImageProcessor.register(snake_case__ , snake_case__ )
# If remote code is not set, the default is to use local
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(snake_case__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 674 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 674 | """simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a_ :
def __init__( self : Optional[int] , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=64 , snake_case__ : Any=None ):
lowerCAmelCase__ = np.random.default_rng(snake_case__ )
lowerCAmelCase__ = length
lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Optional[Any] ):
return self.length
def __getitem__( self : List[str] , snake_case__ : Optional[int] ):
return {"x": self.x[i], "y": self.y[i]}
class a_ ( torch.nn.Module ):
def __init__( self : List[str] , snake_case__ : str=0 , snake_case__ : Dict=0 , snake_case__ : Any=False ):
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = True
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Any=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a[0] + self.b[0]
class a_ ( torch.nn.Module ):
def __init__( self : Any , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=0 , snake_case__ : List[Any]=False ):
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() )
lowerCAmelCase__ = True
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any]=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a + self.b
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase__ = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
lowerCAmelCase__ = load_dataset("""csv""" , data_files=lowerCamelCase__ )
lowerCAmelCase__ = datasets["""train"""].unique("""label""" )
lowerCAmelCase__ = {v: i for i, v in enumerate(lowerCamelCase__ )}
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" )
if "label" in examples:
lowerCAmelCase__ = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCamelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=2 )
lowerCAmelCase__ = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 674 | 1 |
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ = 100 ):
"""simple docstring"""
lowerCAmelCase__ = (n * (n + 1) // 2) ** 2
lowerCAmelCase__ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 674 | """simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = MobileBertConfig.from_json_file(lowerCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = MobileBertForPreTraining(lowerCamelCase__ )
# Load weights from tf checkpoint
lowerCAmelCase__ = load_tf_weights_in_mobilebert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 674 | 1 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__lowerCAmelCase : Tuple = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class a_ ( unittest.TestCase ):
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ):
lowerCAmelCase__ = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCAmelCase__ = FlaxBertModel(snake_case__ )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
lowerCAmelCase__ = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
lowerCAmelCase__ = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case__ , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(snake_case__ , repo_id="""test-model-flax""" , push_to_hub=snake_case__ , use_auth_token=self._token )
lowerCAmelCase__ = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
lowerCAmelCase__ = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case__ , 1E-3 , msg=F"""{key} not identical""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCAmelCase__ = FlaxBertModel(snake_case__ )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
lowerCAmelCase__ = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
lowerCAmelCase__ = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case__ , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
snake_case__ , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=snake_case__ , use_auth_token=self._token )
lowerCAmelCase__ = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
lowerCAmelCase__ = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case__ , 1E-3 , msg=F"""{key} not identical""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = True
lowerCAmelCase__ = flatten_dict(modela.params )
lowerCAmelCase__ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
lowerCAmelCase__ = False
return models_are_equal
@require_flax
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
lowerCAmelCase__ = FlaxBertModel(snake_case__ )
lowerCAmelCase__ = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(snake_case__ , snake_case__ ) )
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = FlaxBertModel.from_pretrained(snake_case__ )
lowerCAmelCase__ = FlaxBertModel.from_pretrained(snake_case__ , subfolder=snake_case__ )
self.assertTrue(check_models_equal(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
lowerCAmelCase__ = FlaxBertModel(snake_case__ )
lowerCAmelCase__ = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(snake_case__ , snake_case__ ) , max_shard_size="""10KB""" )
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = FlaxBertModel.from_pretrained(snake_case__ )
lowerCAmelCase__ = FlaxBertModel.from_pretrained(snake_case__ , subfolder=snake_case__ )
self.assertTrue(check_models_equal(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = """bert"""
lowerCAmelCase__ = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = FlaxBertModel.from_pretrained(snake_case__ )
lowerCAmelCase__ = FlaxBertModel.from_pretrained(snake_case__ , subfolder=snake_case__ )
self.assertIsNotNone(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = """bert"""
lowerCAmelCase__ = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = FlaxBertModel.from_pretrained(snake_case__ )
lowerCAmelCase__ = FlaxBertModel.from_pretrained(snake_case__ , subfolder=snake_case__ )
self.assertIsNotNone(snake_case__ )
| 674 | """simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
lowerCAmelCase__ = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(lowerCamelCase__ )
else:
lowerCAmelCase__ = sylvester(number - 1 )
lowerCAmelCase__ = num - 1
lowerCAmelCase__ = num
return lower * upper + 1
if __name__ == "__main__":
print(F"The 8th number in Sylvester's sequence: {sylvester(8)}")
| 674 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 674 | """simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Tuple = PegasusTokenizer
UpperCamelCase_ : Any = PegasusTokenizerFast
UpperCamelCase_ : int = True
UpperCamelCase_ : Any = True
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = PegasusTokenizer(snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : Optional[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any] ):
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = """</s>"""
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(snake_case__ ) , 1103 )
def _SCREAMING_SNAKE_CASE ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCAmelCase__ = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
lowerCAmelCase__ = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowerCAmelCase__ = """To ensure a smooth flow of bank resolutions."""
lowerCAmelCase__ = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = ["""This is going to be way too long.""" * 150, """short example"""]
lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
lowerCAmelCase__ = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
@slow
def _SCREAMING_SNAKE_CASE ( self : str ):
# fmt: off
lowerCAmelCase__ = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = PegasusTokenizer
UpperCamelCase_ : Optional[int] = PegasusTokenizerFast
UpperCamelCase_ : Union[str, Any] = True
UpperCamelCase_ : Optional[int] = True
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = PegasusTokenizer(snake_case__ , offset=0 , mask_token_sent=snake_case__ , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict ):
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : List[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Dict ):
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = ["""This is going to be way too long.""" * 1000, """short example"""]
lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
lowerCAmelCase__ = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
lowerCAmelCase__ = self._large_tokenizer(snake_case__ ).input_ids
self.assertListEqual(
snake_case__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 674 | 1 |
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = BigBirdConfig.from_json_file(lowerCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
lowerCAmelCase__ = BigBirdForQuestionAnswering(lowerCamelCase__ )
else:
lowerCAmelCase__ = BigBirdForPreTraining(lowerCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(lowerCamelCase__ , lowerCamelCase__ , is_trivia_qa=lowerCamelCase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
__lowerCAmelCase : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 674 | """simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : int ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case__ , )
assert hasattr(self , """env""" )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Optional[Any] ):
# configuration for running training on smdistributed Model Parallel
lowerCAmelCase__ = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowerCAmelCase__ = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowerCAmelCase__ = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowerCAmelCase__ = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="""py36""" , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : str ):
TrainingJobAnalytics(snake_case__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : List[str] ):
# create estimator
lowerCAmelCase__ = self.create_estimator(snake_case__ )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case__ )
| 674 | 1 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
__lowerCAmelCase : Tuple = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
__lowerCAmelCase : Optional[int] = logging.WARNING
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = os.getenv("""DATASETS_VERBOSITY""" , lowerCamelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def _UpperCAmelCase ( ):
"""simple docstring"""
return __name__.split(""".""" )[0]
def _UpperCAmelCase ( ):
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def _UpperCAmelCase ( lowerCamelCase__ = None ):
"""simple docstring"""
if name is None:
lowerCAmelCase__ = _get_library_name()
return logging.getLogger(lowerCamelCase__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
return _get_library_root_logger().getEffectiveLevel()
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
_get_library_root_logger().setLevel(lowerCamelCase__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
return set_verbosity(lowerCamelCase__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
return set_verbosity(lowerCamelCase__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
return set_verbosity(lowerCamelCase__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
return set_verbosity(lowerCamelCase__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = False
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class a_ :
def __init__( self : str , *snake_case__ : Any , **snake_case__ : List[Any] ): # pylint: disable=unused-argument
lowerCAmelCase__ = args[0] if args else None
def __iter__( self : List[Any] ):
return iter(self._iterator )
def __getattr__( self : Any , snake_case__ : Dict ):
def empty_fn(*snake_case__ : List[str] , **snake_case__ : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[Any] ):
return self
def __exit__( self : int , snake_case__ : str , snake_case__ : List[str] , snake_case__ : str ):
return
__lowerCAmelCase : Tuple = True
class a_ :
def __call__( self : int , *snake_case__ : Optional[int] , snake_case__ : Optional[int]=False , **snake_case__ : Optional[int] ):
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*snake_case__ , **snake_case__ )
else:
return EmptyTqdm(*snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , *snake_case__ : str , **snake_case__ : str ):
lowerCAmelCase__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Any ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowerCAmelCase : Union[str, Any] = _tqdm_cls()
def _UpperCAmelCase ( ):
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def _UpperCAmelCase ( ):
"""simple docstring"""
global _tqdm_active
lowerCAmelCase__ = True
def _UpperCAmelCase ( ):
"""simple docstring"""
global _tqdm_active
lowerCAmelCase__ = False
| 674 | """simple docstring"""
from math import pi, sqrt
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_71.5:
raise OverflowError("""math range error""" )
elif num - int(lowerCamelCase__ ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(lowerCamelCase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _UpperCAmelCase ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(lowerCamelCase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowerCAmelCase : Dict = 1.0
while num:
__lowerCAmelCase : Any = float(input("Gamma of: "))
print(F"gamma({num}) = {gamma(num)}")
print("\nEnter 0 to exit...")
| 674 | 1 |
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if len(lowerCamelCase__ ) <= 1:
return [tuple(lowerCamelCase__ )]
lowerCAmelCase__ = []
def generate(lowerCamelCase__ , lowerCamelCase__ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCamelCase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
lowerCAmelCase__ , lowerCAmelCase__ = arr[k - 1], arr[i]
else: # k is odd
lowerCAmelCase__ , lowerCAmelCase__ = arr[k - 1], arr[0]
generate(k - 1 , lowerCamelCase__ )
generate(len(lowerCamelCase__ ) , lowerCamelCase__ )
return res
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase : Tuple = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 674 | """simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
def __init__( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Any=13 , snake_case__ : int=30 , snake_case__ : int=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : Dict=True , snake_case__ : Optional[int]=True , snake_case__ : List[Any]=32 , snake_case__ : List[str]=2 , snake_case__ : Optional[Any]=4 , snake_case__ : Optional[int]=37 , snake_case__ : Tuple="gelu" , snake_case__ : str=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=10 , snake_case__ : Dict=0.02 , snake_case__ : Union[str, Any]=3 , snake_case__ : str=None , snake_case__ : List[Any]=2 , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scope
lowerCAmelCase__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = num_patches + 2
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
lowerCAmelCase__ = TFDeiTModel(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict ):
lowerCAmelCase__ = TFDeiTForMaskedImageModeling(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = TFDeiTForMaskedImageModeling(snake_case__ )
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Tuple ):
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ )
lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ )
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : Any = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : int = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = TFDeiTModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _SCREAMING_SNAKE_CASE ( self : Any ):
pass
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Dense ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ )
lowerCAmelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Union[str, Any]=False ):
lowerCAmelCase__ = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDeiTModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any ):
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=snake_case__ , return_tensors="""tf""" )
# forward pass
lowerCAmelCase__ = model(**snake_case__ )
# verify the logits
lowerCAmelCase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase__ = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
| 674 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__lowerCAmelCase : str = None
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : str = "▁"
__lowerCAmelCase : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase : str = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
__lowerCAmelCase : Dict = {
"google/pegasus-xsum": 5_12,
}
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : int = VOCAB_FILES_NAMES
UpperCamelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Dict = PegasusTokenizer
UpperCamelCase_ : Any = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] , snake_case__ : Tuple=None , snake_case__ : List[str]=None , snake_case__ : int="<pad>" , snake_case__ : Dict="</s>" , snake_case__ : List[str]="<unk>" , snake_case__ : int="<mask_2>" , snake_case__ : Tuple="<mask_1>" , snake_case__ : Optional[Any]=None , snake_case__ : str=103 , **snake_case__ : List[str] , ):
lowerCAmelCase__ = offset
if additional_special_tokens is not None:
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError(
F"""additional_special_tokens should be of type {type(snake_case__ )}, but is"""
F""" {type(snake_case__ )}""" )
lowerCAmelCase__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(snake_case__ ) , self.offset - 1 )
]
if len(set(snake_case__ ) ) != len(snake_case__ ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
lowerCAmelCase__ = additional_special_tokens_extended
else:
lowerCAmelCase__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , pad_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , mask_token=snake_case__ , mask_token_sent=snake_case__ , offset=snake_case__ , additional_special_tokens=snake_case__ , **snake_case__ , )
lowerCAmelCase__ = vocab_file
lowerCAmelCase__ = False if not self.vocab_file else True
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : str ):
lowerCAmelCase__ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
F""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : List , snake_case__ : Optional[List] = None , snake_case__ : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(snake_case__ )
elif token_ids_a is None:
return self._special_token_mask(snake_case__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : str , snake_case__ : Any=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(snake_case__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ = os.path.join(
snake_case__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 674 | """simple docstring"""
from __future__ import annotations
from math import gcd
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 2 , lowerCamelCase__ = 1 , lowerCamelCase__ = 3 , ):
"""simple docstring"""
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
return (pow(lowerCamelCase__ , 2 ) + step) % modulus
for _ in range(lowerCamelCase__ ):
# These track the position within the cycle detection logic.
lowerCAmelCase__ = seed
lowerCAmelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCAmelCase__ = gcd(hare - tortoise , lowerCamelCase__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCAmelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
__lowerCAmelCase : List[str] = parser.parse_args()
__lowerCAmelCase : Dict = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"{args.num} is probably prime")
else:
__lowerCAmelCase : List[str] = args.num // divisor
print(F"{args.num} = {divisor} * {quotient}")
| 674 | 1 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__lowerCAmelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__lowerCAmelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
__lowerCAmelCase : set[int] = {ord(char) for char in VALID_CHARS}
__lowerCAmelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = ""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
for keychar, cipherchar in zip(cycle(lowerCamelCase__ ) , lowerCamelCase__ ):
lowerCAmelCase__ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowerCamelCase__ )
return decoded
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = []
for key in product(lowerCamelCase__ , repeat=3 ):
lowerCAmelCase__ = try_key(lowerCamelCase__ , lowerCamelCase__ )
if encoded is not None:
possibles.append(lowerCamelCase__ )
return possibles
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def _UpperCAmelCase ( lowerCamelCase__ = "p059_cipher.txt" ):
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = Path(lowerCamelCase__ ).parent.joinpath(lowerCamelCase__ ).read_text(encoding="""utf-8""" )
lowerCAmelCase__ = [int(lowerCamelCase__ ) for number in data.strip().split(""",""" )]
lowerCAmelCase__ = filter_valid_chars(lowerCamelCase__ )
for common_word in COMMON_WORDS:
lowerCAmelCase__ = filter_common_word(lowerCamelCase__ , lowerCamelCase__ )
if len(lowerCamelCase__ ) == 1:
break
lowerCAmelCase__ = possibles[0]
return sum(ord(lowerCamelCase__ ) for char in decoded_text )
if __name__ == "__main__":
print(F"{solution() = }")
| 674 | """simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = TapasConfig.from_json_file(lowerCamelCase__ )
# set absolute/relative position embeddings parameter
lowerCAmelCase__ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
lowerCAmelCase__ = 4
lowerCAmelCase__ = True
# hparam_utils.py hparams
lowerCAmelCase__ = 0.66_46_94
lowerCAmelCase__ = 0.20_79_51
lowerCAmelCase__ = 0.12_11_94
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = 0.0_35_25_13
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowerCAmelCase__ = 4
lowerCAmelCase__ = False
# hparam_utils.py hparams
lowerCAmelCase__ = 36.45_19
lowerCAmelCase__ = 0.90_34_21
lowerCAmelCase__ = 2_22.0_88
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = 0.76_31_41
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "TABFACT":
lowerCAmelCase__ = TapasForSequenceClassification(config=lowerCamelCase__ )
elif task == "MLM":
lowerCAmelCase__ = TapasForMaskedLM(config=lowerCamelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
lowerCAmelCase__ = TapasModel(config=lowerCamelCase__ )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCamelCase__ )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
lowerCAmelCase__ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 )
tokenizer.save_pretrained(lowerCamelCase__ )
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 674 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase : Optional[int] = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
__lowerCAmelCase : Optional[Any] = {"allegro/herbert-base-cased": 5_14}
__lowerCAmelCase : List[str] = {}
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Optional[int] = VOCAB_FILES_NAMES
UpperCamelCase_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : List[str] = HerbertTokenizer
def __init__( self : Union[str, Any] , snake_case__ : List[str]=None , snake_case__ : List[Any]=None , snake_case__ : Any=None , snake_case__ : Dict="<s>" , snake_case__ : Optional[Any]="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : Dict="</s>" , **snake_case__ : int , ):
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sep_token=snake_case__ , **snake_case__ , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
lowerCAmelCase__ = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 674 | """simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ = 50 ):
"""simple docstring"""
lowerCAmelCase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"{solution() = }")
| 674 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
def __init__( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Any=13 , snake_case__ : int=30 , snake_case__ : int=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : Dict=True , snake_case__ : Optional[int]=True , snake_case__ : List[Any]=32 , snake_case__ : List[str]=2 , snake_case__ : Optional[Any]=4 , snake_case__ : Optional[int]=37 , snake_case__ : Tuple="gelu" , snake_case__ : str=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=10 , snake_case__ : Dict=0.02 , snake_case__ : Union[str, Any]=3 , snake_case__ : str=None , snake_case__ : List[Any]=2 , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scope
lowerCAmelCase__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = num_patches + 2
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
lowerCAmelCase__ = TFDeiTModel(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict ):
lowerCAmelCase__ = TFDeiTForMaskedImageModeling(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = TFDeiTForMaskedImageModeling(snake_case__ )
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Tuple ):
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ )
lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ )
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : Any = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : int = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = TFDeiTModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _SCREAMING_SNAKE_CASE ( self : Any ):
pass
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Dense ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ )
lowerCAmelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Union[str, Any]=False ):
lowerCAmelCase__ = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDeiTModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any ):
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=snake_case__ , return_tensors="""tf""" )
# forward pass
lowerCAmelCase__ = model(**snake_case__ )
# verify the logits
lowerCAmelCase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase__ = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
| 674 | """simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : Any = "The Nymphenburg Palace is a beautiful palace in Munich!"
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = {
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 1024,
"""hidden_size""": 768,
"""max_length""": 512,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 1024,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1e-5,
"""token_type_vocab_size""": 2,
}
lowerCAmelCase__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowerCAmelCase__ = BERTEncoder(
attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=lowerCamelCase__ , output_all_encodings=lowerCamelCase__ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , lowerCamelCase__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowerCAmelCase__ = """openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
lowerCAmelCase__ = os.path.join(get_home_dir() , """models""" )
lowerCAmelCase__ = _load_vocab(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , cls=lowerCamelCase__ )
lowerCAmelCase__ = nlp.model.BERTModel(
lowerCamelCase__ , len(lowerCamelCase__ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=lowerCamelCase__ , use_token_type_embed=lowerCamelCase__ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=lowerCamelCase__ , use_decoder=lowerCamelCase__ , )
original_bort.load_parameters(lowerCamelCase__ , cast_dtype=lowerCamelCase__ , ignore_extra=lowerCamelCase__ )
lowerCAmelCase__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
lowerCAmelCase__ = {
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.02,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(lowerCamelCase__ ),
}
lowerCAmelCase__ = BertConfig.from_dict(lowerCamelCase__ )
lowerCAmelCase__ = BertForMaskedLM(lowerCamelCase__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase__ = hf_param.shape
lowerCAmelCase__ = to_torch(params[gluon_param] )
lowerCAmelCase__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowerCAmelCase__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowerCAmelCase__ = hf_bort_model.bert.encoder.layer[i]
# self attention
lowerCAmelCase__ = layer.attention.self
lowerCAmelCase__ = check_and_map_params(
self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
lowerCAmelCase__ = layer.attention.output
lowerCAmelCase__ = check_and_map_params(
self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
lowerCAmelCase__ = layer.intermediate
lowerCAmelCase__ = check_and_map_params(
intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
lowerCAmelCase__ = check_and_map_params(
intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
lowerCAmelCase__ = layer.output
lowerCAmelCase__ = check_and_map_params(
bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
lowerCAmelCase__ = RobertaTokenizer.from_pretrained("""roberta-base""" )
lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ )["""input_ids"""]
# Get gluon output
lowerCAmelCase__ = mx.nd.array([input_ids] )
lowerCAmelCase__ = original_bort(inputs=lowerCamelCase__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase__ )
lowerCAmelCase__ = BertModel.from_pretrained(lowerCamelCase__ )
hf_bort_model.eval()
lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ , return_tensors="""pt""" )
lowerCAmelCase__ = hf_bort_model(**lowerCamelCase__ )[0]
lowerCAmelCase__ = output_gluon[0].asnumpy()
lowerCAmelCase__ = output_hf[0].detach().numpy()
lowerCAmelCase__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowerCAmelCase__ = np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
if success:
print("""✔️ Both model do output the same tensors""" )
else:
print("""❌ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""" , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : str = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 674 | 1 |
"""simple docstring"""
from maths.prime_factors import prime_factors
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase__ = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCamelCase__ )
if number < 1:
raise ValueError("""Input must be a positive integer""" )
return -1 if len(prime_factors(lowerCamelCase__ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 674 | """simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self : Optional[int] ):
lowerCAmelCase__ = """"""
lowerCAmelCase__ = """"""
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = 256
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Union[str, Any] ):
lowerCAmelCase__ = cva.imread(snake_case__ , 0 )
lowerCAmelCase__ = copy.deepcopy(self.img )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="""x""" )
lowerCAmelCase__ = np.sum(snake_case__ )
for i in range(len(snake_case__ ) ):
lowerCAmelCase__ = x[i] / self.k
self.sk += prk
lowerCAmelCase__ = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase__ = int(last % last )
lowerCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case__ )
lowerCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase__ = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase__ = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
__lowerCAmelCase : Dict = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
__lowerCAmelCase : Optional[int] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 674 | 1 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ ( __UpperCamelCase ):
def __init__( self : List[str] , snake_case__ : List[Any] , snake_case__ : Tuple=13 , snake_case__ : str=7 , snake_case__ : int=True , snake_case__ : Any=True , snake_case__ : Optional[int]=True , snake_case__ : List[str]=True , snake_case__ : Union[str, Any]=99 , snake_case__ : List[Any]=32 , snake_case__ : List[Any]=5 , snake_case__ : Union[str, Any]=4 , snake_case__ : Optional[int]=37 , snake_case__ : Any="gelu" , snake_case__ : Any=0.1 , snake_case__ : int=0.1 , snake_case__ : Any=512 , snake_case__ : str=16 , snake_case__ : Optional[Any]=2 , snake_case__ : List[str]=0.02 , snake_case__ : List[Any]=False , snake_case__ : Dict=True , snake_case__ : List[Any]="None" , snake_case__ : List[str]=3 , snake_case__ : Dict=4 , snake_case__ : List[Any]=None , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = relative_attention
lowerCAmelCase__ = position_biased_input
lowerCAmelCase__ = pos_att_type
lowerCAmelCase__ = scope
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = self.get_config()
lowerCAmelCase__ = 300
return config
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Dict ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ):
lowerCAmelCase__ = DebertaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase__ = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )[0]
lowerCAmelCase__ = model(snake_case__ , token_type_ids=snake_case__ )[0]
lowerCAmelCase__ = model(snake_case__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ):
lowerCAmelCase__ = DebertaForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase__ = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Tuple ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DebertaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase__ = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Optional[int] ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DebertaForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase__ = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Union[str, Any] ):
lowerCAmelCase__ = DebertaForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase__ = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Optional[int] = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : int = True
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : Dict = False
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Union[str, Any] = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = DebertaModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : int ):
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = DebertaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
lowerCAmelCase__ = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
lowerCAmelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ = model(snake_case__ , attention_mask=snake_case__ )[0]
# compare the actual values for a slice.
lowerCAmelCase__ = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1E-4 ) , F"""{output[:, 1:4, 1:4]}""" )
| 674 | """simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : List[str] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCamelCase_ : str = "CIDAS/clipseg-rd64-refined"
UpperCamelCase_ : Any = "image_segmenter"
UpperCamelCase_ : Optional[Any] = CLIPSegForImageSegmentation
UpperCamelCase_ : List[str] = ["image", "text"]
UpperCamelCase_ : int = ["image"]
def __init__( self : Tuple , *snake_case__ : str , **snake_case__ : Optional[Any] ):
requires_backends(self , ["""vision"""] )
super().__init__(*snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : "Image" , snake_case__ : str ):
return self.pre_processor(text=[label] , images=[image] , padding=snake_case__ , return_tensors="""pt""" )
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Tuple ):
with torch.no_grad():
lowerCAmelCase__ = self.model(**snake_case__ ).logits
return logits
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : List[Any] ):
lowerCAmelCase__ = outputs.cpu().detach().numpy()
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 674 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
__lowerCAmelCase : Union[str, Any] = "https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__lowerCAmelCase : int = BASE_URL + "/user"
# https://github.com/settings/tokens
__lowerCAmelCase : Tuple = os.environ.get("USER_TOKEN", "")
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = {
"""Authorization""": f"""token {auth_token}""",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(lowerCamelCase__ , headers=lowerCamelCase__ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"{key}: {value}")
else:
raise ValueError("'USER_TOKEN' field cannot be empty.")
| 674 | """simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = LayoutLMTokenizer
UpperCamelCase_ : List[Any] = LayoutLMTokenizerFast
UpperCamelCase_ : Dict = True
UpperCamelCase_ : Any = True
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
super().setUp()
lowerCAmelCase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : int , **snake_case__ : Union[str, Any] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Tuple ):
lowerCAmelCase__ = """UNwant\u00E9d,running"""
lowerCAmelCase__ = """unwanted, running"""
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(snake_case__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [7, 4, 5, 10, 8, 9] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
| 674 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : List[str] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCamelCase_ : str = "CIDAS/clipseg-rd64-refined"
UpperCamelCase_ : Any = "image_segmenter"
UpperCamelCase_ : Optional[Any] = CLIPSegForImageSegmentation
UpperCamelCase_ : List[str] = ["image", "text"]
UpperCamelCase_ : int = ["image"]
def __init__( self : Tuple , *snake_case__ : str , **snake_case__ : Optional[Any] ):
requires_backends(self , ["""vision"""] )
super().__init__(*snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : "Image" , snake_case__ : str ):
return self.pre_processor(text=[label] , images=[image] , padding=snake_case__ , return_tensors="""pt""" )
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Tuple ):
with torch.no_grad():
lowerCAmelCase__ = self.model(**snake_case__ ).logits
return logits
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : List[Any] ):
lowerCAmelCase__ = outputs.cpu().detach().numpy()
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 674 | """simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__lowerCAmelCase : Any = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
def __init__( self : List[str] , snake_case__ : int = 14 ):
if group not in primes:
raise ValueError("""Unsupported Group""" )
lowerCAmelCase__ = primes[group]["""prime"""]
lowerCAmelCase__ = primes[group]["""generator"""]
lowerCAmelCase__ = int(hexlify(urandom(32 ) ) , base=16 )
def _SCREAMING_SNAKE_CASE ( self : Any ):
return hex(self.__private_key )[2:]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = pow(self.generator , self.__private_key , self.prime )
return hex(snake_case__ )[2:]
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(snake_case__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : str ):
lowerCAmelCase__ = int(snake_case__ , base=16 )
if not self.is_valid_public_key(snake_case__ ):
raise ValueError("""Invalid public key""" )
lowerCAmelCase__ = pow(snake_case__ , self.__private_key , self.prime )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case__ : int , snake_case__ : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(snake_case__ , (prime - 1) // 2 , snake_case__ ) == 1
)
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case__ : str , snake_case__ : str , snake_case__ : int = 14 ):
lowerCAmelCase__ = int(snake_case__ , base=16 )
lowerCAmelCase__ = int(snake_case__ , base=16 )
lowerCAmelCase__ = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(snake_case__ , snake_case__ ):
raise ValueError("""Invalid public key""" )
lowerCAmelCase__ = pow(snake_case__ , snake_case__ , snake_case__ )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 674 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__lowerCAmelCase : Optional[Any] = trt.Logger(trt.Logger.WARNING)
__lowerCAmelCase : int = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__lowerCAmelCase : Dict = logging.getLogger(__name__)
__lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=3_84,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=1_28,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
__lowerCAmelCase : Tuple = parser.parse_args()
if args.tokenizer_name:
__lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
__lowerCAmelCase : int = args.per_device_eval_batch_size
__lowerCAmelCase : Union[str, Any] = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__lowerCAmelCase : Dict = True
__lowerCAmelCase : Dict = "temp_engine/bert-fp32.engine"
if args.fpaa:
__lowerCAmelCase : Any = "temp_engine/bert-fp16.engine"
if args.inta:
__lowerCAmelCase : Optional[Any] = "temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
__lowerCAmelCase : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__lowerCAmelCase : Optional[Any] = [network.get_input(i) for i in range(network.num_inputs)]
__lowerCAmelCase : List[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__lowerCAmelCase : str = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__lowerCAmelCase : Any = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__lowerCAmelCase : Any = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
lowerCAmelCase__ = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
lowerCAmelCase__ = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase__ )
# start time
lowerCAmelCase__ = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase__ ) for d_inp in d_inputs] + [int(lowerCamelCase__ ), int(lowerCamelCase__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
cuda.memcpy_dtoh_async(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = end_time - start_time
lowerCAmelCase__ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__lowerCAmelCase : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase : str = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__lowerCAmelCase : Dict = raw_datasets["validation"].column_names
__lowerCAmelCase : Any = "question" if "question" in column_names else column_names[0]
__lowerCAmelCase : List[str] = "context" if "context" in column_names else column_names[1]
__lowerCAmelCase : Any = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__lowerCAmelCase : Optional[Any] = tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
__lowerCAmelCase : int = min(args.max_seq_length, tokenizer.model_max_length)
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCAmelCase__ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase__ , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCAmelCase__ = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCAmelCase__ = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCAmelCase__ = tokenized_examples.sequence_ids(lowerCamelCase__ )
lowerCAmelCase__ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCAmelCase__ = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCAmelCase__ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
__lowerCAmelCase : List[str] = raw_datasets["validation"]
# Validation Feature Creation
__lowerCAmelCase : str = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
__lowerCAmelCase : Optional[Any] = default_data_collator
__lowerCAmelCase : int = eval_dataset.remove_columns(["example_id", "offset_mapping"])
__lowerCAmelCase : Any = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="eval" ):
"""simple docstring"""
lowerCAmelCase__ = postprocess_qa_predictions(
examples=lowerCamelCase__ , features=lowerCamelCase__ , predictions=lowerCamelCase__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCAmelCase__ = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
lowerCAmelCase__ = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
lowerCAmelCase__ = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase__ , label_ids=lowerCamelCase__ )
__lowerCAmelCase : List[Any] = load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return trt.volume(engine.get_binding_shape(lowerCamelCase__ ) ) * engine.get_binding_dtype(lowerCamelCase__ ).itemsize
# Allocate device memory for inputs and outputs.
__lowerCAmelCase : Dict = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__lowerCAmelCase : int = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__lowerCAmelCase : List[str] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__lowerCAmelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
__lowerCAmelCase : Union[str, Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__lowerCAmelCase : Any = cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(F" Num examples = {len(eval_dataset)}")
logger.info(F" Batch size = {args.per_device_eval_batch_size}")
__lowerCAmelCase : List[str] = 0.0
__lowerCAmelCase : str = 0
__lowerCAmelCase : Dict = timeit.default_timer()
__lowerCAmelCase : List[str] = None
for step, batch in enumerate(eval_dataloader):
__lowerCAmelCase , __lowerCAmelCase : Tuple = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__lowerCAmelCase , __lowerCAmelCase : List[str] = outputs
__lowerCAmelCase : Dict = torch.tensor(start_logits)
__lowerCAmelCase : List[str] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__lowerCAmelCase : List[str] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00)
__lowerCAmelCase : int = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00)
__lowerCAmelCase : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__lowerCAmelCase : Union[str, Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00)
if all_preds is not None:
__lowerCAmelCase : str = nested_truncate(all_preds, len(eval_dataset))
__lowerCAmelCase : str = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 10_00 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 10_00))
logger.info("Total Number of Inference = %d", niter)
__lowerCAmelCase : Any = post_processing_function(eval_examples, eval_dataset, all_preds)
__lowerCAmelCase : List[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"Evaluation metrics: {eval_metric}")
| 674 | """simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(weights[0] )
lowerCAmelCase__ = np.asarray(weights[1] )
lowerCAmelCase__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(weights[0] )
lowerCAmelCase__ = np.asarray(weights[1] )
lowerCAmelCase__ = np.asarray(weights[2] )
lowerCAmelCase__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = weights[0][0][0]
lowerCAmelCase__ = np.asarray(layer_norm_a[0] )
lowerCAmelCase__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# lsh weights + output
lowerCAmelCase__ = weights[0][1]
if len(lowerCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
else:
set_layer_weights_in_torch_local(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
# intermediate weighs
lowerCAmelCase__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase__ ) == 4:
lowerCAmelCase__ = intermediate_weights[2]
# layernorm 2
lowerCAmelCase__ = np.asarray(intermediate_weights[0][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# intermediate dense
lowerCAmelCase__ = np.asarray(intermediate_weights[1][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
# intermediate out
lowerCAmelCase__ = np.asarray(intermediate_weights[4][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = torch_model.reformer
# word embeds
lowerCAmelCase__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase__ ) , )
if isinstance(weights[3] , lowerCamelCase__ ):
lowerCAmelCase__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCAmelCase__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
lowerCAmelCase__ = nn.Parameter(torch.tensor(lowerCamelCase__ ) )
lowerCAmelCase__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCAmelCase__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# output layer norm
lowerCAmelCase__ = np.asarray(weights[7][0] )
lowerCAmelCase__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# output embeddings
lowerCAmelCase__ = np.asarray(weights[9][0] )
lowerCAmelCase__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = ReformerConfig.from_json_file(lowerCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = ReformerModelWithLMHead(lowerCamelCase__ )
with open(lowerCamelCase__ , """rb""" ) as f:
lowerCAmelCase__ = pickle.load(lowerCamelCase__ )["""weights"""]
set_model_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 674 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] = MgpstrTokenizer
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : Dict = {}
UpperCamelCase_ : int = False
def _SCREAMING_SNAKE_CASE ( self : int ):
super().setUp()
# fmt: off
lowerCAmelCase__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowerCAmelCase__ = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case__ ) + """\n""" )
def _SCREAMING_SNAKE_CASE ( self : Tuple , **snake_case__ : Dict ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Any ):
lowerCAmelCase__ = """tester"""
lowerCAmelCase__ = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
pass
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.get_tokenizers(do_lower_case=snake_case__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase__ = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
lowerCAmelCase__ = tokenizer.encode([special_token] , add_special_tokens=snake_case__ )
self.assertEqual(len(snake_case__ ) , 1 )
lowerCAmelCase__ = tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
self.assertTrue(special_token not in decoded )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase__ , lowerCAmelCase__ = self.get_input_output_texts(snake_case__ )
lowerCAmelCase__ = tokenizer.tokenize(snake_case__ )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(snake_case__ )
lowerCAmelCase__ = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertNotEqual(len(snake_case__ ) , 0 )
lowerCAmelCase__ = tokenizer.decode(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual(text_a.replace(""" """ , """""" ) , snake_case__ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
pass
| 674 | """simple docstring"""
import os
from math import logaa
def _UpperCAmelCase ( lowerCamelCase__ = "base_exp.txt" ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCamelCase__ ) , lowerCamelCase__ ) ) ):
lowerCAmelCase__ , lowerCAmelCase__ = list(map(lowerCamelCase__ , line.split(""",""" ) ) )
if x * logaa(lowerCamelCase__ ) > largest:
lowerCAmelCase__ = x * logaa(lowerCamelCase__ )
lowerCAmelCase__ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 674 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = LayoutLMTokenizer
UpperCamelCase_ : List[Any] = LayoutLMTokenizerFast
UpperCamelCase_ : Dict = True
UpperCamelCase_ : Any = True
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
super().setUp()
lowerCAmelCase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : int , **snake_case__ : Union[str, Any] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Tuple ):
lowerCAmelCase__ = """UNwant\u00E9d,running"""
lowerCAmelCase__ = """unwanted, running"""
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(snake_case__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [7, 4, 5, 10, 8, 9] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
| 674 | """simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
while b:
lowerCAmelCase__ , lowerCAmelCase__ = b, a % b
return a
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCamelCase__ , a % b )
def _UpperCAmelCase ( ):
"""simple docstring"""
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 674 | 1 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class a_ ( __UpperCamelCase , __UpperCamelCase ):
@register_to_config
def __init__( self : Dict , snake_case__ : int = 768 , ):
super().__init__()
lowerCAmelCase__ = nn.Parameter(torch.zeros(1 , snake_case__ ) )
lowerCAmelCase__ = nn.Parameter(torch.ones(1 , snake_case__ ) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Optional[Union[str, torch.device]] = None , snake_case__ : Optional[torch.dtype] = None , ):
lowerCAmelCase__ = nn.Parameter(self.mean.to(snake_case__ ).to(snake_case__ ) )
lowerCAmelCase__ = nn.Parameter(self.std.to(snake_case__ ).to(snake_case__ ) )
return self
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Any ):
lowerCAmelCase__ = (embeds - self.mean) * 1.0 / self.std
return embeds
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : Dict ):
lowerCAmelCase__ = (embeds * self.std) + self.mean
return embeds
| 674 | """simple docstring"""
import os
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = os.path.dirname(os.path.realpath(lowerCamelCase__ ) )
lowerCAmelCase__ = os.path.join(lowerCamelCase__ , """triangle.txt""" )
with open(lowerCamelCase__ ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = []
for line in triangle:
lowerCAmelCase__ = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(lowerCamelCase__ ) )
a.append(lowerCamelCase__ )
for i in range(1 , len(lowerCamelCase__ ) ):
for j in range(len(a[i] ) ):
lowerCAmelCase__ = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowerCAmelCase__ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowerCamelCase__ , lowerCamelCase__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 674 | 1 |
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
assert (
isinstance(lowerCamelCase__ , lowerCamelCase__ ) and number_of_steps > 0
), f"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
lowerCAmelCase__ , lowerCAmelCase__ = 1, 1
for _ in range(number_of_steps - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 674 | """simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__lowerCAmelCase : Union[str, Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
__lowerCAmelCase : Optional[int] = json.load(f)
@require_torch
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Dict ):
return FSMTTokenizer.from_pretrained(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Any ):
lowerCAmelCase__ = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Any , snake_case__ : int ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowerCAmelCase__ = F"""facebook/wmt19-{pair}"""
lowerCAmelCase__ = self.get_tokenizer(snake_case__ )
lowerCAmelCase__ = self.get_model(snake_case__ )
lowerCAmelCase__ = bleu_data[pair]["""src"""]
lowerCAmelCase__ = bleu_data[pair]["""tgt"""]
lowerCAmelCase__ = tokenizer(snake_case__ , return_tensors="""pt""" , truncation=snake_case__ , padding="""longest""" ).to(snake_case__ )
lowerCAmelCase__ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCAmelCase__ = tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
lowerCAmelCase__ = calculate_bleu(snake_case__ , snake_case__ )
print(snake_case__ )
self.assertGreaterEqual(scores["""bleu"""] , snake_case__ )
| 674 | 1 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class a_ :
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
torch.manual_seed(0 )
lowerCAmelCase__ = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase__ = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=snake_case__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowerCAmelCase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _SCREAMING_SNAKE_CASE ( self : str ):
torch.manual_seed(0 )
lowerCAmelCase__ = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.414 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase__ = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=snake_case__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowerCAmelCase__ = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
lowerCAmelCase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase__ = inputs["""prompt"""]
lowerCAmelCase__ = inputs["""generator"""]
lowerCAmelCase__ = inputs["""num_inference_steps"""]
lowerCAmelCase__ = inputs["""output_type"""]
if "image" in inputs:
lowerCAmelCase__ = inputs["""image"""]
else:
lowerCAmelCase__ = None
if "mask_image" in inputs:
lowerCAmelCase__ = inputs["""mask_image"""]
else:
lowerCAmelCase__ = None
if "original_image" in inputs:
lowerCAmelCase__ = inputs["""original_image"""]
else:
lowerCAmelCase__ = None
lowerCAmelCase__ , lowerCAmelCase__ = pipe.encode_prompt(snake_case__ )
# inputs with prompt converted to embeddings
lowerCAmelCase__ = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowerCAmelCase__ = image
if mask_image is not None:
lowerCAmelCase__ = mask_image
if original_image is not None:
lowerCAmelCase__ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase__ = pipe(**snake_case__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(snake_case__ )
lowerCAmelCase__ = self.pipeline_class.from_pretrained(snake_case__ )
pipe_loaded.to(snake_case__ )
pipe_loaded.set_progress_bar_config(disable=snake_case__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(snake_case__ , snake_case__ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
lowerCAmelCase__ = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase__ = inputs["""generator"""]
lowerCAmelCase__ = inputs["""num_inference_steps"""]
lowerCAmelCase__ = inputs["""output_type"""]
# inputs with prompt converted to embeddings
lowerCAmelCase__ = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowerCAmelCase__ = image
if mask_image is not None:
lowerCAmelCase__ = mask_image
if original_image is not None:
lowerCAmelCase__ = original_image
lowerCAmelCase__ = pipe_loaded(**snake_case__ )[0]
lowerCAmelCase__ = np.abs(to_np(snake_case__ ) - to_np(snake_case__ ) ).max()
self.assertLess(snake_case__ , 1E-4 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase__ = pipe(**snake_case__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(snake_case__ )
lowerCAmelCase__ = self.pipeline_class.from_pretrained(snake_case__ )
pipe_loaded.to(snake_case__ )
pipe_loaded.set_progress_bar_config(disable=snake_case__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowerCAmelCase__ = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase__ = pipe_loaded(**snake_case__ )[0]
lowerCAmelCase__ = np.abs(to_np(snake_case__ ) - to_np(snake_case__ ) ).max()
self.assertLess(snake_case__ , 1E-4 )
| 674 | """simple docstring"""
import pprint
import requests
__lowerCAmelCase : Union[str, Any] = "https://zenquotes.io/api"
def _UpperCAmelCase ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def _UpperCAmelCase ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = random_quotes()
pprint.pprint(response)
| 674 | 1 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
random.seed(lowerCamelCase__ )
np.random.seed(lowerCamelCase__ )
torch.manual_seed(lowerCamelCase__ )
torch.cuda.manual_seed_all(lowerCamelCase__ )
# ^^ safe to call this function even if cuda is not available
class a_ :
def __init__( self : Any , snake_case__ : Iterable[torch.nn.Parameter] , snake_case__ : float = 0.9999 , snake_case__ : float = 0.0 , snake_case__ : int = 0 , snake_case__ : bool = False , snake_case__ : Union[float, int] = 1.0 , snake_case__ : Union[float, int] = 2 / 3 , snake_case__ : Optional[Any] = None , snake_case__ : Dict[str, Any] = None , **snake_case__ : List[Any] , ):
if isinstance(snake_case__ , torch.nn.Module ):
lowerCAmelCase__ = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , snake_case__ , standard_warn=snake_case__ , )
lowerCAmelCase__ = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
lowerCAmelCase__ = True
if kwargs.get("""max_value""" , snake_case__ ) is not None:
lowerCAmelCase__ = """The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , snake_case__ , standard_warn=snake_case__ )
lowerCAmelCase__ = kwargs["""max_value"""]
if kwargs.get("""min_value""" , snake_case__ ) is not None:
lowerCAmelCase__ = """The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , snake_case__ , standard_warn=snake_case__ )
lowerCAmelCase__ = kwargs["""min_value"""]
lowerCAmelCase__ = list(snake_case__ )
lowerCAmelCase__ = [p.clone().detach() for p in parameters]
if kwargs.get("""device""" , snake_case__ ) is not None:
lowerCAmelCase__ = """The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , snake_case__ , standard_warn=snake_case__ )
self.to(device=kwargs["""device"""] )
lowerCAmelCase__ = None
lowerCAmelCase__ = decay
lowerCAmelCase__ = min_decay
lowerCAmelCase__ = update_after_step
lowerCAmelCase__ = use_ema_warmup
lowerCAmelCase__ = inv_gamma
lowerCAmelCase__ = power
lowerCAmelCase__ = 0
lowerCAmelCase__ = None # set in `step()`
lowerCAmelCase__ = model_cls
lowerCAmelCase__ = model_config
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int , snake_case__ : Tuple , snake_case__ : Optional[Any] ):
lowerCAmelCase__ , lowerCAmelCase__ = model_cls.load_config(snake_case__ , return_unused_kwargs=snake_case__ )
lowerCAmelCase__ = model_cls.from_pretrained(snake_case__ )
lowerCAmelCase__ = cls(model.parameters() , model_cls=snake_case__ , model_config=model.config )
ema_model.load_state_dict(snake_case__ )
return ema_model
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : str ):
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
lowerCAmelCase__ = self.model_cls.from_config(self.model_config )
lowerCAmelCase__ = self.state_dict()
state_dict.pop("""shadow_params""" , snake_case__ )
model.register_to_config(**snake_case__ )
self.copy_to(model.parameters() )
model.save_pretrained(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : int ):
lowerCAmelCase__ = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
lowerCAmelCase__ = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
lowerCAmelCase__ = (1 + step) / (10 + step)
lowerCAmelCase__ = min(snake_case__ , self.decay )
# make sure decay is not smaller than min_decay
lowerCAmelCase__ = max(snake_case__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Iterable[torch.nn.Parameter] ):
if isinstance(snake_case__ , torch.nn.Module ):
lowerCAmelCase__ = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , snake_case__ , standard_warn=snake_case__ , )
lowerCAmelCase__ = parameters.parameters()
lowerCAmelCase__ = list(snake_case__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
lowerCAmelCase__ = self.get_decay(self.optimization_step )
lowerCAmelCase__ = decay
lowerCAmelCase__ = 1 - decay
lowerCAmelCase__ = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , snake_case__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
lowerCAmelCase__ = deepspeed.zero.GatheredParameters(snake_case__ , modifier_rank=snake_case__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Iterable[torch.nn.Parameter] ):
lowerCAmelCase__ = list(snake_case__ )
for s_param, param in zip(self.shadow_params , snake_case__ ):
param.data.copy_(s_param.to(param.device ).data )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : str=None , snake_case__ : List[Any]=None ):
lowerCAmelCase__ = [
p.to(device=snake_case__ , dtype=snake_case__ ) if p.is_floating_point() else p.to(device=snake_case__ )
for p in self.shadow_params
]
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Iterable[torch.nn.Parameter] ):
lowerCAmelCase__ = [param.detach().cpu().clone() for param in parameters]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , snake_case__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
lowerCAmelCase__ = None
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : dict ):
lowerCAmelCase__ = copy.deepcopy(snake_case__ )
lowerCAmelCase__ = state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
lowerCAmelCase__ = state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , snake_case__ ):
raise ValueError("""Invalid min_decay""" )
lowerCAmelCase__ = state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , snake_case__ ):
raise ValueError("""Invalid optimization_step""" )
lowerCAmelCase__ = state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , snake_case__ ):
raise ValueError("""Invalid update_after_step""" )
lowerCAmelCase__ = state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , snake_case__ ):
raise ValueError("""Invalid use_ema_warmup""" )
lowerCAmelCase__ = state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
lowerCAmelCase__ = state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
lowerCAmelCase__ = state_dict.get("""shadow_params""" , snake_case__ )
if shadow_params is not None:
lowerCAmelCase__ = shadow_params
if not isinstance(self.shadow_params , snake_case__ ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(snake_case__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 674 | """simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = 0
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ).to_dict()
config_dict.pop("""image_processor_type""" )
lowerCAmelCase__ = CLIPImageProcessor(**snake_case__ )
# save in new folder
model_config.save_pretrained(snake_case__ )
config.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
# make sure private variable is not incorrectly saved
lowerCAmelCase__ = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
with self.assertRaisesRegex(
snake_case__ , """clip-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""clip-base""" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
with self.assertRaisesRegex(
snake_case__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , revision="""aaaaaa""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
with self.assertRaisesRegex(
snake_case__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def _SCREAMING_SNAKE_CASE ( self : Any ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , trust_remote_code=snake_case__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoImageProcessor.register(snake_case__ , snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoImageProcessor.register(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = CustomImageProcessor.from_pretrained(snake_case__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Tuple = True
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoImageProcessor.register(snake_case__ , snake_case__ )
# If remote code is not set, the default is to use local
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(snake_case__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 674 | 1 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if isinstance(lowerCamelCase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class a_ :
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] ):
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
pass
def _SCREAMING_SNAKE_CASE ( self : int ):
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : np.ndarray , snake_case__ : np.ndarray , snake_case__ : float ):
lowerCAmelCase__ = np.abs((a - b) ).max()
self.assertLessEqual(snake_case__ , snake_case__ , F"""Difference between torch and flax is {diff} (>= {tol}).""" )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : str , snake_case__ : Dict , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : str=None , **snake_case__ : Optional[Any] ):
lowerCAmelCase__ = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ , snake_case__ )
lowerCAmelCase__ = FlaxVisionTextDualEncoderModel(snake_case__ )
lowerCAmelCase__ = model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Dict , snake_case__ : Optional[int]=None , **snake_case__ : Tuple ):
lowerCAmelCase__ , lowerCAmelCase__ = self.get_vision_text_model(snake_case__ , snake_case__ )
lowerCAmelCase__ = {"""vision_model""": vision_model, """text_model""": text_model}
lowerCAmelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ )
lowerCAmelCase__ = model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : str , snake_case__ : int=None , **snake_case__ : Optional[Any] ):
lowerCAmelCase__ , lowerCAmelCase__ = self.get_vision_text_model(snake_case__ , snake_case__ )
lowerCAmelCase__ = {"""vision_model""": vision_model, """text_model""": text_model}
lowerCAmelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ )
lowerCAmelCase__ = model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ )
lowerCAmelCase__ = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case__ )
lowerCAmelCase__ = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ )
lowerCAmelCase__ = model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ )
lowerCAmelCase__ = after_output[0]
lowerCAmelCase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case__ , 1E-3 )
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : List[str]=None , **snake_case__ : List[str] ):
lowerCAmelCase__ , lowerCAmelCase__ = self.get_vision_text_model(snake_case__ , snake_case__ )
lowerCAmelCase__ = {"""vision_model""": vision_model, """text_model""": text_model}
lowerCAmelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ )
lowerCAmelCase__ = model(
input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , output_attentions=snake_case__ )
lowerCAmelCase__ = output.vision_model_output.attentions
self.assertEqual(len(snake_case__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ = output.text_model_output.attentions
self.assertEqual(len(snake_case__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : int ):
pt_model.to(snake_case__ )
pt_model.eval()
# prepare inputs
lowerCAmelCase__ = inputs_dict
lowerCAmelCase__ = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowerCAmelCase__ = pt_model(**snake_case__ ).to_tuple()
lowerCAmelCase__ = fx_model(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(snake_case__ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(snake_case__ )
lowerCAmelCase__ = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ , from_pt=snake_case__ )
lowerCAmelCase__ = fx_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(snake_case__ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(snake_case__ )
lowerCAmelCase__ = VisionTextDualEncoderModel.from_pretrained(snake_case__ , from_flax=snake_case__ )
pt_model_loaded.to(snake_case__ )
pt_model_loaded.eval()
with torch.no_grad():
lowerCAmelCase__ = pt_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(snake_case__ , pt_output_loaded.numpy() , 4E-2 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[Any] ):
lowerCAmelCase__ = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ , snake_case__ )
lowerCAmelCase__ = VisionTextDualEncoderModel(snake_case__ )
lowerCAmelCase__ = FlaxVisionTextDualEncoderModel(snake_case__ )
lowerCAmelCase__ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , snake_case__ )
lowerCAmelCase__ = fx_state
self.check_pt_flax_equivalence(snake_case__ , snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Any , snake_case__ : Any , snake_case__ : Tuple ):
lowerCAmelCase__ = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ , snake_case__ )
lowerCAmelCase__ = VisionTextDualEncoderModel(snake_case__ )
lowerCAmelCase__ = FlaxVisionTextDualEncoderModel(snake_case__ )
lowerCAmelCase__ = load_flax_weights_in_pytorch_model(snake_case__ , fx_model.params )
self.check_pt_flax_equivalence(snake_case__ , snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
self.check_save_load(**snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**snake_case__ )
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ = config_inputs_dict.pop("""vision_config""" )
lowerCAmelCase__ = config_inputs_dict.pop("""text_config""" )
lowerCAmelCase__ = config_inputs_dict
self.check_equivalence_pt_to_flax(snake_case__ , snake_case__ , snake_case__ )
self.check_equivalence_flax_to_pt(snake_case__ , snake_case__ , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ , lowerCAmelCase__ = self.get_pretrained_model_and_inputs()
lowerCAmelCase__ = model_a(**snake_case__ )
lowerCAmelCase__ = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(snake_case__ )
lowerCAmelCase__ = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ )
lowerCAmelCase__ = model_a(**snake_case__ )
lowerCAmelCase__ = after_outputs[0]
lowerCAmelCase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case__ , 1E-5 )
@require_flax
class a_ ( __UpperCamelCase , unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=snake_case__ , text_from_pt=snake_case__ , )
lowerCAmelCase__ = 13
lowerCAmelCase__ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCAmelCase__ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowerCAmelCase__ = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : str , snake_case__ : Optional[int] ):
lowerCAmelCase__ = FlaxViTModel(snake_case__ )
lowerCAmelCase__ = FlaxBertModel(snake_case__ )
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = FlaxViTModelTester(self )
lowerCAmelCase__ = FlaxBertModelTester(self )
lowerCAmelCase__ = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ = vision_config_and_inputs
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class a_ ( __UpperCamelCase , unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=snake_case__ , text_from_pt=snake_case__ , )
lowerCAmelCase__ = 13
lowerCAmelCase__ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCAmelCase__ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowerCAmelCase__ = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : Dict , snake_case__ : str ):
lowerCAmelCase__ = FlaxCLIPVisionModel(snake_case__ )
lowerCAmelCase__ = FlaxBertModel(snake_case__ )
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = FlaxCLIPVisionModelTester(self )
lowerCAmelCase__ = FlaxBertModelTester(self )
lowerCAmelCase__ = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ = vision_config_and_inputs
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class a_ ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
lowerCAmelCase__ = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
lowerCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCAmelCase__ = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=snake_case__ , padding=snake_case__ , return_tensors="""np""" )
lowerCAmelCase__ = model(**snake_case__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase__ = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , snake_case__ , atol=1E-3 ) )
| 674 | """simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a_ :
def __init__( self : Optional[int] , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=64 , snake_case__ : Any=None ):
lowerCAmelCase__ = np.random.default_rng(snake_case__ )
lowerCAmelCase__ = length
lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Optional[Any] ):
return self.length
def __getitem__( self : List[str] , snake_case__ : Optional[int] ):
return {"x": self.x[i], "y": self.y[i]}
class a_ ( torch.nn.Module ):
def __init__( self : List[str] , snake_case__ : str=0 , snake_case__ : Dict=0 , snake_case__ : Any=False ):
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = True
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Any=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a[0] + self.b[0]
class a_ ( torch.nn.Module ):
def __init__( self : Any , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=0 , snake_case__ : List[Any]=False ):
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() )
lowerCAmelCase__ = True
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any]=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a + self.b
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase__ = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
lowerCAmelCase__ = load_dataset("""csv""" , data_files=lowerCamelCase__ )
lowerCAmelCase__ = datasets["""train"""].unique("""label""" )
lowerCAmelCase__ = {v: i for i, v in enumerate(lowerCamelCase__ )}
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" )
if "label" in examples:
lowerCAmelCase__ = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCamelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=2 )
lowerCAmelCase__ = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 674 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__lowerCAmelCase : Union[str, Any] = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def _UpperCAmelCase ( lowerCamelCase__ = "mumbai" ):
"""simple docstring"""
lowerCAmelCase__ = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
lowerCAmelCase__ = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
lowerCAmelCase__ = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(F"Job {i:>2} is {job[0]} at {job[1]}")
| 674 | """simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = MobileBertConfig.from_json_file(lowerCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = MobileBertForPreTraining(lowerCamelCase__ )
# Load weights from tf checkpoint
lowerCAmelCase__ = load_tf_weights_in_mobilebert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 674 | 1 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__lowerCAmelCase : Dict = True
except ImportError:
__lowerCAmelCase : Dict = False
try:
from torch.hub import _get_torch_home
__lowerCAmelCase : Optional[Any] = _get_torch_home()
except ImportError:
__lowerCAmelCase : List[Any] = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
__lowerCAmelCase : Optional[Any] = os.path.join(torch_cache_home, "transformers")
__lowerCAmelCase : Any = "https://cdn.huggingface.co"
__lowerCAmelCase : Tuple = "https://s3.amazonaws.com/models.huggingface.co/bert"
__lowerCAmelCase : List[Any] = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
__lowerCAmelCase : Any = os.path.join(PATH, "config.yaml")
__lowerCAmelCase : Union[str, Any] = os.path.join(PATH, "attributes.txt")
__lowerCAmelCase : List[str] = os.path.join(PATH, "objects.txt")
__lowerCAmelCase : int = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
__lowerCAmelCase : Union[str, Any] = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
__lowerCAmelCase : Tuple = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
__lowerCAmelCase : Optional[Any] = "pytorch_model.bin"
__lowerCAmelCase : Any = "config.yaml"
def _UpperCAmelCase ( lowerCamelCase__=OBJECTS , lowerCamelCase__=ATTRIBUTES ):
"""simple docstring"""
lowerCAmelCase__ = []
with open(lowerCamelCase__ ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
lowerCAmelCase__ = []
with open(lowerCamelCase__ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = OrderedDict()
with open(lowerCamelCase__ , """rb""" ) as f:
lowerCAmelCase__ = pkl.load(lowerCamelCase__ )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
lowerCAmelCase__ = ckp.pop(lowerCamelCase__ )
if isinstance(lowerCamelCase__ , np.ndarray ):
lowerCAmelCase__ = torch.tensor(lowerCamelCase__ )
else:
assert isinstance(lowerCamelCase__ , torch.tensor ), type(lowerCamelCase__ )
lowerCAmelCase__ = v
return r
class a_ :
UpperCamelCase_ : List[str] = {}
def __init__( self : Tuple , snake_case__ : dict , snake_case__ : str = "root" , snake_case__ : int=0 ):
lowerCAmelCase__ = name
lowerCAmelCase__ = level
lowerCAmelCase__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
lowerCAmelCase__ = copy.deepcopy(snake_case__ )
lowerCAmelCase__ = copy.deepcopy(snake_case__ )
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase__ = Config(snake_case__ , name=snake_case__ , level=level + 1 )
lowerCAmelCase__ = v
setattr(self , snake_case__ , snake_case__ )
lowerCAmelCase__ = d
def __repr__( self : Optional[Any] ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Dict ):
lowerCAmelCase__ = val
lowerCAmelCase__ = val
lowerCAmelCase__ = key.split(""".""" )
lowerCAmelCase__ = len(snake_case__ ) - 1
lowerCAmelCase__ = self._pointer
if len(snake_case__ ) > 1:
for i, l in enumerate(snake_case__ ):
if hasattr(self , snake_case__ ) and isinstance(getattr(self , snake_case__ ) , snake_case__ ):
setattr(getattr(self , snake_case__ ) , """.""".join(levels[i:] ) , snake_case__ )
if l == last_level:
lowerCAmelCase__ = val
else:
lowerCAmelCase__ = pointer[l]
def _SCREAMING_SNAKE_CASE ( self : int ):
return self._pointer
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Dict , snake_case__ : Tuple ):
with open(F"""{file_name}""" , """w""" ) as stream:
dump(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : List[str] ):
with open(F"""{file_name}""" , """w""" ) as stream:
json.dump(snake_case__ , snake_case__ )
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case__ : List[Any] ):
with open(snake_case__ ) as stream:
lowerCAmelCase__ = load(snake_case__ , Loader=snake_case__ )
return data
def __str__( self : Any ):
lowerCAmelCase__ = """ """
if self._name != "root":
lowerCAmelCase__ = F"""{t * (self._level-1)}{self._name}:\n"""
else:
lowerCAmelCase__ = """"""
lowerCAmelCase__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(snake_case__ , snake_case__ ):
r += F"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += F"""{t * (self._level)}{k}: {v} ({type(snake_case__ ).__name__})\n"""
lowerCAmelCase__ = level
return r[:-1]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any , snake_case__ : str , **snake_case__ : Optional[int] ):
lowerCAmelCase__ , lowerCAmelCase__ = cls.get_config_dict(snake_case__ , **snake_case__ )
return cls(snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , snake_case__ : str , **snake_case__ : Optional[Any] ):
lowerCAmelCase__ = kwargs.pop("""cache_dir""" , snake_case__ )
lowerCAmelCase__ = kwargs.pop("""force_download""" , snake_case__ )
lowerCAmelCase__ = kwargs.pop("""resume_download""" , snake_case__ )
lowerCAmelCase__ = kwargs.pop("""proxies""" , snake_case__ )
lowerCAmelCase__ = kwargs.pop("""local_files_only""" , snake_case__ )
if os.path.isdir(snake_case__ ):
lowerCAmelCase__ = os.path.join(snake_case__ , snake_case__ )
elif os.path.isfile(snake_case__ ) or is_remote_url(snake_case__ ):
lowerCAmelCase__ = pretrained_model_name_or_path
else:
lowerCAmelCase__ = hf_bucket_url(snake_case__ , filename=snake_case__ , use_cdn=snake_case__ )
try:
# Load from URL or cache if already cached
lowerCAmelCase__ = cached_path(
snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , proxies=snake_case__ , resume_download=snake_case__ , local_files_only=snake_case__ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
lowerCAmelCase__ = Config.load_yaml(snake_case__ )
except EnvironmentError:
lowerCAmelCase__ = """Can't load config for"""
raise EnvironmentError(snake_case__ )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(snake_case__ ), kwargs
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = torch.load("""dump.pt""" , map_location=in_tensor.device )
lowerCAmelCase__ = in_tensor.numpy()
lowerCAmelCase__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(lowerCamelCase__ , lowerCamelCase__ , rtol=0.01 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(lowerCamelCase__ , lowerCamelCase__ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = urlparse(lowerCamelCase__ )
return parsed.scheme in ("http", "https")
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True ):
"""simple docstring"""
lowerCAmelCase__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
lowerCAmelCase__ = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=0 , lowerCamelCase__=None , ):
"""simple docstring"""
lowerCAmelCase__ = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
ua += "; " + "; ".join("""{}/{}""".format(lowerCamelCase__ , lowerCamelCase__ ) for k, v in user_agent.items() )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
ua += "; " + user_agent
lowerCAmelCase__ = {"""user-agent""": ua}
if resume_size > 0:
lowerCAmelCase__ = """bytes=%d-""" % (resume_size,)
lowerCAmelCase__ = requests.get(lowerCamelCase__ , stream=lowerCamelCase__ , proxies=lowerCamelCase__ , headers=lowerCamelCase__ )
if response.status_code == 416: # Range not satisfiable
return
lowerCAmelCase__ = response.headers.get("""Content-Length""" )
lowerCAmelCase__ = resume_size + int(lowerCamelCase__ ) if content_length is not None else None
lowerCAmelCase__ = tqdm(
unit="""B""" , unit_scale=lowerCamelCase__ , total=lowerCamelCase__ , initial=lowerCamelCase__ , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowerCamelCase__ ) )
temp_file.write(lowerCamelCase__ )
progress.close()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=None , lowerCamelCase__=10 , lowerCamelCase__=False , lowerCamelCase__=None , lowerCamelCase__=False , ):
"""simple docstring"""
if cache_dir is None:
lowerCAmelCase__ = TRANSFORMERS_CACHE
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase__ = str(lowerCamelCase__ )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
lowerCAmelCase__ = None
if not local_files_only:
try:
lowerCAmelCase__ = requests.head(lowerCamelCase__ , allow_redirects=lowerCamelCase__ , proxies=lowerCamelCase__ , timeout=lowerCamelCase__ )
if response.status_code == 200:
lowerCAmelCase__ = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
lowerCAmelCase__ = url_to_filename(lowerCamelCase__ , lowerCamelCase__ )
# get cache path to put the file
lowerCAmelCase__ = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowerCamelCase__ ):
return cache_path
else:
lowerCAmelCase__ = [
file
for file in fnmatch.filter(os.listdir(lowerCamelCase__ ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(lowerCamelCase__ ) > 0:
return os.path.join(lowerCamelCase__ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(lowerCamelCase__ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lowerCAmelCase__ = cache_path + """.lock"""
with FileLock(lowerCamelCase__ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowerCamelCase__ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
lowerCAmelCase__ = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(lowerCamelCase__ , """a+b""" ) as f:
yield f
lowerCAmelCase__ = _resumable_file_manager
if os.path.exists(lowerCamelCase__ ):
lowerCAmelCase__ = os.stat(lowerCamelCase__ ).st_size
else:
lowerCAmelCase__ = 0
else:
lowerCAmelCase__ = partial(tempfile.NamedTemporaryFile , dir=lowerCamelCase__ , delete=lowerCamelCase__ )
lowerCAmelCase__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , lowerCamelCase__ , temp_file.name , )
http_get(
lowerCamelCase__ , lowerCamelCase__ , proxies=lowerCamelCase__ , resume_size=lowerCamelCase__ , user_agent=lowerCamelCase__ , )
os.replace(temp_file.name , lowerCamelCase__ )
lowerCAmelCase__ = {"""url""": url, """etag""": etag}
lowerCAmelCase__ = cache_path + """.json"""
with open(lowerCamelCase__ , """w""" ) as meta_file:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
return cache_path
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
lowerCAmelCase__ = url.encode("""utf-8""" )
lowerCAmelCase__ = shaaaa(lowerCamelCase__ )
lowerCAmelCase__ = url_hash.hexdigest()
if etag:
lowerCAmelCase__ = etag.encode("""utf-8""" )
lowerCAmelCase__ = shaaaa(lowerCamelCase__ )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , ):
"""simple docstring"""
if cache_dir is None:
lowerCAmelCase__ = TRANSFORMERS_CACHE
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase__ = str(lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase__ = str(lowerCamelCase__ )
if is_remote_url(lowerCamelCase__ ):
# URL, so get it from the cache (downloading if necessary)
lowerCAmelCase__ = get_from_cache(
lowerCamelCase__ , cache_dir=lowerCamelCase__ , force_download=lowerCamelCase__ , proxies=lowerCamelCase__ , resume_download=lowerCamelCase__ , user_agent=lowerCamelCase__ , local_files_only=lowerCamelCase__ , )
elif os.path.exists(lowerCamelCase__ ):
# File, and it exists.
lowerCAmelCase__ = url_or_filename
elif urlparse(lowerCamelCase__ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(lowerCamelCase__ ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(lowerCamelCase__ ) )
if extract_compressed_file:
if not is_zipfile(lowerCamelCase__ ) and not tarfile.is_tarfile(lowerCamelCase__ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
lowerCAmelCase__ , lowerCAmelCase__ = os.path.split(lowerCamelCase__ )
lowerCAmelCase__ = output_file.replace(""".""" , """-""" ) + """-extracted"""
lowerCAmelCase__ = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
if os.path.isdir(lowerCamelCase__ ) and os.listdir(lowerCamelCase__ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lowerCAmelCase__ = output_path + """.lock"""
with FileLock(lowerCamelCase__ ):
shutil.rmtree(lowerCamelCase__ , ignore_errors=lowerCamelCase__ )
os.makedirs(lowerCamelCase__ )
if is_zipfile(lowerCamelCase__ ):
with ZipFile(lowerCamelCase__ , """r""" ) as zip_file:
zip_file.extractall(lowerCamelCase__ )
zip_file.close()
elif tarfile.is_tarfile(lowerCamelCase__ ):
lowerCAmelCase__ = tarfile.open(lowerCamelCase__ )
tar_file.extractall(lowerCamelCase__ )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(lowerCamelCase__ ) )
return output_path_extracted
return output_path
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__="," ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
if os.path.isfile(lowerCamelCase__ ):
with open(lowerCamelCase__ ) as f:
lowerCAmelCase__ = eval(f.read() )
else:
lowerCAmelCase__ = requests.get(lowerCamelCase__ )
try:
lowerCAmelCase__ = requests.json()
except Exception:
lowerCAmelCase__ = req.content.decode()
assert data is not None, "could not connect"
try:
lowerCAmelCase__ = eval(lowerCamelCase__ )
except Exception:
lowerCAmelCase__ = data.split("""\n""" )
req.close()
return data
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = requests.get(lowerCamelCase__ )
lowerCAmelCase__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowerCamelCase__ )
with open(lowerCamelCase__ , """rb""" ) as stream:
lowerCAmelCase__ = pkl.load(lowerCamelCase__ )
lowerCAmelCase__ = weights.pop("""model""" )
lowerCAmelCase__ = {}
for k, v in model.items():
lowerCAmelCase__ = torch.from_numpy(lowerCamelCase__ )
if "running_var" in k:
lowerCAmelCase__ = torch.tensor([0] )
lowerCAmelCase__ = k.replace("""running_var""" , """num_batches_tracked""" )
lowerCAmelCase__ = zero
return new
def _UpperCAmelCase ( ):
"""simple docstring"""
print(f"""{os.path.abspath(os.path.join(lowerCamelCase__ , os.pardir ) )}/demo.ipynb""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__="RGB" ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
if os.path.isfile(lowerCamelCase__ ):
lowerCAmelCase__ = cva.imread(lowerCamelCase__ )
else:
lowerCAmelCase__ = get_image_from_url(lowerCamelCase__ )
assert img is not None, f"""could not connect to: {im}"""
lowerCAmelCase__ = cva.cvtColor(lowerCamelCase__ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
lowerCAmelCase__ = img[:, :, ::-1]
return img
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__=1 ):
"""simple docstring"""
return (images[i : i + batch] for i in range(0 , len(lowerCamelCase__ ) , lowerCamelCase__ ))
| 674 | """simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
lowerCAmelCase__ = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(lowerCamelCase__ )
else:
lowerCAmelCase__ = sylvester(number - 1 )
lowerCAmelCase__ = num - 1
lowerCAmelCase__ = num
return lower * upper + 1
if __name__ == "__main__":
print(F"The 8th number in Sylvester's sequence: {sylvester(8)}")
| 674 | 1 |
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
while b:
lowerCAmelCase__ , lowerCAmelCase__ = b, a % b
return a
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCamelCase__ , a % b )
def _UpperCAmelCase ( ):
"""simple docstring"""
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 674 | """simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Tuple = PegasusTokenizer
UpperCamelCase_ : Any = PegasusTokenizerFast
UpperCamelCase_ : int = True
UpperCamelCase_ : Any = True
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = PegasusTokenizer(snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : Optional[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any] ):
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = """</s>"""
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(snake_case__ ) , 1103 )
def _SCREAMING_SNAKE_CASE ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCAmelCase__ = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
lowerCAmelCase__ = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowerCAmelCase__ = """To ensure a smooth flow of bank resolutions."""
lowerCAmelCase__ = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = ["""This is going to be way too long.""" * 150, """short example"""]
lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
lowerCAmelCase__ = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
@slow
def _SCREAMING_SNAKE_CASE ( self : str ):
# fmt: off
lowerCAmelCase__ = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = PegasusTokenizer
UpperCamelCase_ : Optional[int] = PegasusTokenizerFast
UpperCamelCase_ : Union[str, Any] = True
UpperCamelCase_ : Optional[int] = True
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = PegasusTokenizer(snake_case__ , offset=0 , mask_token_sent=snake_case__ , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict ):
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : List[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Dict ):
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = ["""This is going to be way too long.""" * 1000, """short example"""]
lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
lowerCAmelCase__ = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
lowerCAmelCase__ = self._large_tokenizer(snake_case__ ).input_ids
self.assertListEqual(
snake_case__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 674 | 1 |
"""simple docstring"""
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__lowerCAmelCase : str = logging.getLogger()
__lowerCAmelCase : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class a_ ( __UpperCamelCase ):
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Any ):
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowerCAmelCase__ = {"""source""": """What is love ?""", """target""": """life"""}
lowerCAmelCase__ = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCAmelCase__ = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(snake_case__ , F"""{split}.{field}""" ) , """w""" ) as f:
f.write(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : int , snake_case__ : str = "pytorch" ):
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = os.path.join(snake_case__ , """output""" )
lowerCAmelCase__ = os.path.join(snake_case__ , """data""" )
self._create_dummy_data(data_dir=snake_case__ )
lowerCAmelCase__ = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
lowerCAmelCase__ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(snake_case__ , env=self.get_env() )
lowerCAmelCase__ = os.path.join(snake_case__ , """metrics.json""" )
with open(snake_case__ ) as f:
lowerCAmelCase__ = json.load(snake_case__ )
return result
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 674 | """simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : int ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case__ , )
assert hasattr(self , """env""" )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Optional[Any] ):
# configuration for running training on smdistributed Model Parallel
lowerCAmelCase__ = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowerCAmelCase__ = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowerCAmelCase__ = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowerCAmelCase__ = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="""py36""" , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : str ):
TrainingJobAnalytics(snake_case__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : List[str] ):
# create estimator
lowerCAmelCase__ = self.create_estimator(snake_case__ )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case__ )
| 674 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = KandinskyVaaInpaintPipeline
UpperCamelCase_ : List[str] = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
UpperCamelCase_ : Dict = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
UpperCamelCase_ : List[str] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCamelCase_ : Dict = False
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : Any ):
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : str ):
return self.time_input_dim
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
return 100
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowerCAmelCase__ = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowerCAmelCase__ = UNetaDConditionModel(**snake_case__ )
return model
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
torch.manual_seed(0 )
lowerCAmelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = self.dummy_unet
lowerCAmelCase__ = self.dummy_movq
lowerCAmelCase__ = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=snake_case__ , )
lowerCAmelCase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : List[Any] , snake_case__ : Optional[Any]=0 ):
lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
lowerCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(snake_case__ ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
lowerCAmelCase__ = np.ones((64, 64) , dtype=np.floataa )
lowerCAmelCase__ = 0
if str(snake_case__ ).startswith("""mps""" ):
lowerCAmelCase__ = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase__ = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase__ = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = """cpu"""
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**snake_case__ )
lowerCAmelCase__ = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = pipe(**self.get_dummy_inputs(snake_case__ ) )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
lowerCAmelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowerCAmelCase__ = np.ones((768, 768) , dtype=np.floataa )
lowerCAmelCase__ = 0
lowerCAmelCase__ = """a hat"""
lowerCAmelCase__ = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
lowerCAmelCase__ = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
lowerCAmelCase__ = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase__ , lowerCAmelCase__ = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowerCAmelCase__ = pipeline(
image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 674 | """simple docstring"""
from math import pi, sqrt
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_71.5:
raise OverflowError("""math range error""" )
elif num - int(lowerCamelCase__ ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(lowerCamelCase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _UpperCAmelCase ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(lowerCamelCase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowerCAmelCase : Dict = 1.0
while num:
__lowerCAmelCase : Any = float(input("Gamma of: "))
print(F"gamma({num}) = {gamma(num)}")
print("\nEnter 0 to exit...")
| 674 | 1 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : str = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class a_ :
def __init__( self : Any , snake_case__ : Any=None , **snake_case__ : Dict ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
lowerCAmelCase__ = model
lowerCAmelCase__ = kwargs.get("""model_save_dir""" , snake_case__ )
lowerCAmelCase__ = kwargs.get("""latest_model_name""" , snake_case__ )
def __call__( self : Tuple , **snake_case__ : List[Any] ):
lowerCAmelCase__ = {k: np.array(snake_case__ ) for k, v in kwargs.items()}
return self.model.run(snake_case__ , snake_case__ )
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case__ : Union[str, Path] , snake_case__ : Optional[Any]=None , snake_case__ : Union[str, Any]=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
lowerCAmelCase__ = """CPUExecutionProvider"""
return ort.InferenceSession(snake_case__ , providers=[provider] , sess_options=snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Union[str, Path] , snake_case__ : Optional[str] = None , **snake_case__ : str ):
lowerCAmelCase__ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowerCAmelCase__ = self.model_save_dir.joinpath(self.latest_model_name )
lowerCAmelCase__ = Path(snake_case__ ).joinpath(snake_case__ )
try:
shutil.copyfile(snake_case__ , snake_case__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowerCAmelCase__ = self.model_save_dir.joinpath(snake_case__ )
if src_path.exists():
lowerCAmelCase__ = Path(snake_case__ ).joinpath(snake_case__ )
try:
shutil.copyfile(snake_case__ , snake_case__ )
except shutil.SameFileError:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Dict , ):
if os.path.isfile(snake_case__ ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
# saving model weights/files
self._save_pretrained(snake_case__ , **snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] , snake_case__ : Union[str, Path] , snake_case__ : Optional[Union[bool, str, None]] = None , snake_case__ : Optional[Union[str, None]] = None , snake_case__ : bool = False , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional["ort.SessionOptions"] = None , **snake_case__ : List[Any] , ):
lowerCAmelCase__ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(snake_case__ ):
lowerCAmelCase__ = OnnxRuntimeModel.load_model(
os.path.join(snake_case__ , snake_case__ ) , provider=snake_case__ , sess_options=snake_case__ )
lowerCAmelCase__ = Path(snake_case__ )
# load model from hub
else:
# download model
lowerCAmelCase__ = hf_hub_download(
repo_id=snake_case__ , filename=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , )
lowerCAmelCase__ = Path(snake_case__ ).parent
lowerCAmelCase__ = Path(snake_case__ ).name
lowerCAmelCase__ = OnnxRuntimeModel.load_model(snake_case__ , provider=snake_case__ , sess_options=snake_case__ )
return cls(model=snake_case__ , **snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , snake_case__ : Union[str, Path] , snake_case__ : bool = True , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , **snake_case__ : int , ):
lowerCAmelCase__ = None
if len(str(snake_case__ ).split("""@""" ) ) == 2:
lowerCAmelCase__ , lowerCAmelCase__ = model_id.split("""@""" )
return cls._from_pretrained(
model_id=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , use_auth_token=snake_case__ , **snake_case__ , )
| 674 | """simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
def __init__( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Any=13 , snake_case__ : int=30 , snake_case__ : int=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : Dict=True , snake_case__ : Optional[int]=True , snake_case__ : List[Any]=32 , snake_case__ : List[str]=2 , snake_case__ : Optional[Any]=4 , snake_case__ : Optional[int]=37 , snake_case__ : Tuple="gelu" , snake_case__ : str=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=10 , snake_case__ : Dict=0.02 , snake_case__ : Union[str, Any]=3 , snake_case__ : str=None , snake_case__ : List[Any]=2 , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scope
lowerCAmelCase__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = num_patches + 2
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
lowerCAmelCase__ = TFDeiTModel(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict ):
lowerCAmelCase__ = TFDeiTForMaskedImageModeling(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = TFDeiTForMaskedImageModeling(snake_case__ )
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Tuple ):
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ )
lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ )
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : Any = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : int = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = TFDeiTModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _SCREAMING_SNAKE_CASE ( self : Any ):
pass
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Dense ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ )
lowerCAmelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Union[str, Any]=False ):
lowerCAmelCase__ = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDeiTModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any ):
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=snake_case__ , return_tensors="""tf""" )
# forward pass
lowerCAmelCase__ = model(**snake_case__ )
# verify the logits
lowerCAmelCase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase__ = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
| 674 | 1 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__lowerCAmelCase : int = logging.get_logger(__name__)
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ):
"""simple docstring"""
if "." in tensor_name:
lowerCAmelCase__ = tensor_name.split(""".""" )
for split in splits[:-1]:
lowerCAmelCase__ = getattr(lowerCamelCase__ , lowerCamelCase__ )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
lowerCAmelCase__ = new_module
lowerCAmelCase__ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
lowerCAmelCase__ = tensor_name in module._buffers
lowerCAmelCase__ = getattr(lowerCamelCase__ , lowerCamelCase__ )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
lowerCAmelCase__ = False
lowerCAmelCase__ = False
if is_buffer or not is_bitsandbytes_available():
lowerCAmelCase__ = False
lowerCAmelCase__ = False
else:
lowerCAmelCase__ = hasattr(bnb.nn , """Params4bit""" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
lowerCAmelCase__ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
lowerCAmelCase__ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowerCAmelCase__ = old_value.to(lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , torch.Tensor ):
lowerCAmelCase__ = value.to("""cpu""" )
if value.dtype == torch.inta:
lowerCAmelCase__ = version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
lowerCAmelCase__ = torch.tensor(lowerCamelCase__ , device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , lowerCamelCase__ ) and fpaa_statistics is None:
lowerCAmelCase__ = new_value.T
lowerCAmelCase__ = old_value.__dict__
if is_abit:
lowerCAmelCase__ = bnb.nn.IntaParams(lowerCamelCase__ , requires_grad=lowerCamelCase__ , **lowerCamelCase__ ).to(lowerCamelCase__ )
elif is_abit:
lowerCAmelCase__ = bnb.nn.Paramsabit(lowerCamelCase__ , requires_grad=lowerCamelCase__ , **lowerCamelCase__ ).to(lowerCamelCase__ )
lowerCAmelCase__ = new_value
if fpaa_statistics is not None:
setattr(module.weight , """SCB""" , fpaa_statistics.to(lowerCamelCase__ ) )
else:
if value is None:
lowerCAmelCase__ = old_value.to(lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , torch.Tensor ):
lowerCAmelCase__ = value.to(lowerCamelCase__ )
else:
lowerCAmelCase__ = torch.tensor(lowerCamelCase__ , device=lowerCamelCase__ )
if is_buffer:
lowerCAmelCase__ = new_value
else:
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ , requires_grad=old_value.requires_grad )
lowerCAmelCase__ = new_value
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase__ = []
current_key_name.append(lowerCamelCase__ )
if (isinstance(lowerCamelCase__ , nn.Linear ) or isinstance(lowerCamelCase__ , lowerCamelCase__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(lowerCamelCase__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase__ , lowerCAmelCase__ = module.weight.shape
else:
lowerCAmelCase__ = module.in_features
lowerCAmelCase__ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowerCAmelCase__ = bnb.nn.LinearabitLt(
lowerCamelCase__ , lowerCamelCase__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
lowerCAmelCase__ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowerCAmelCase__ = bnb.nn.Linearabit(
lowerCamelCase__ , lowerCamelCase__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
lowerCAmelCase__ = True
# Store the module class in case we need to transpose the weight later
lowerCAmelCase__ = type(lowerCamelCase__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(lowerCamelCase__ )
if len(list(module.children() ) ) > 0:
lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_linear(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , has_been_replaced=lowerCamelCase__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None ):
"""simple docstring"""
lowerCAmelCase__ = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_linear(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _UpperCAmelCase ( *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" , lowerCamelCase__ , )
return replace_with_bnb_linear(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCAmelCase ( *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" , lowerCamelCase__ , )
return set_module_quantized_tensor_to_device(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = deepcopy(lowerCamelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowerCAmelCase__ = find_tied_parameters(lowerCamelCase__ )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCAmelCase__ = sum(lowerCamelCase__ , [] )
lowerCAmelCase__ = len(lowerCamelCase__ ) > 0
# Check if it is a base model
lowerCAmelCase__ = not hasattr(lowerCamelCase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase__ = list(model.named_children() )
lowerCAmelCase__ = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase__ = set(lowerCamelCase__ ) - set(lowerCamelCase__ )
lowerCAmelCase__ = list(set(lowerCamelCase__ ) ) + list(lowerCamelCase__ )
# remove ".weight" from the keys
lowerCAmelCase__ = [""".weight""", """.bias"""]
lowerCAmelCase__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase__ = name.replace(lowerCamelCase__ , """""" )
filtered_module_names.append(lowerCamelCase__ )
return filtered_module_names
| 674 | """simple docstring"""
from __future__ import annotations
from math import gcd
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 2 , lowerCamelCase__ = 1 , lowerCamelCase__ = 3 , ):
"""simple docstring"""
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
return (pow(lowerCamelCase__ , 2 ) + step) % modulus
for _ in range(lowerCamelCase__ ):
# These track the position within the cycle detection logic.
lowerCAmelCase__ = seed
lowerCAmelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCAmelCase__ = gcd(hare - tortoise , lowerCamelCase__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCAmelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
__lowerCAmelCase : List[str] = parser.parse_args()
__lowerCAmelCase : Dict = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"{args.num} is probably prime")
else:
__lowerCAmelCase : List[str] = args.num // divisor
print(F"{args.num} = {divisor} * {quotient}")
| 674 | 1 |
"""simple docstring"""
import os
from math import logaa
def _UpperCAmelCase ( lowerCamelCase__ = "base_exp.txt" ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCamelCase__ ) , lowerCamelCase__ ) ) ):
lowerCAmelCase__ , lowerCAmelCase__ = list(map(lowerCamelCase__ , line.split(""",""" ) ) )
if x * logaa(lowerCamelCase__ ) > largest:
lowerCAmelCase__ = x * logaa(lowerCamelCase__ )
lowerCAmelCase__ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 674 | """simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = TapasConfig.from_json_file(lowerCamelCase__ )
# set absolute/relative position embeddings parameter
lowerCAmelCase__ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
lowerCAmelCase__ = 4
lowerCAmelCase__ = True
# hparam_utils.py hparams
lowerCAmelCase__ = 0.66_46_94
lowerCAmelCase__ = 0.20_79_51
lowerCAmelCase__ = 0.12_11_94
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = 0.0_35_25_13
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowerCAmelCase__ = 4
lowerCAmelCase__ = False
# hparam_utils.py hparams
lowerCAmelCase__ = 36.45_19
lowerCAmelCase__ = 0.90_34_21
lowerCAmelCase__ = 2_22.0_88
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = 0.76_31_41
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "TABFACT":
lowerCAmelCase__ = TapasForSequenceClassification(config=lowerCamelCase__ )
elif task == "MLM":
lowerCAmelCase__ = TapasForMaskedLM(config=lowerCamelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
lowerCAmelCase__ = TapasModel(config=lowerCamelCase__ )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCamelCase__ )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
lowerCAmelCase__ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 )
tokenizer.save_pretrained(lowerCamelCase__ )
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 674 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : List[str] = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : List[str] = "roc_bert"
def __init__( self : List[str] , snake_case__ : int=30522 , snake_case__ : Tuple=768 , snake_case__ : Union[str, Any]=12 , snake_case__ : Any=12 , snake_case__ : int=3072 , snake_case__ : List[Any]="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : Any=0.1 , snake_case__ : List[str]=512 , snake_case__ : Any=2 , snake_case__ : List[str]=0.02 , snake_case__ : Optional[int]=1E-12 , snake_case__ : Dict=True , snake_case__ : List[Any]=0 , snake_case__ : List[Any]="absolute" , snake_case__ : int=None , snake_case__ : Optional[Any]=True , snake_case__ : Any=True , snake_case__ : str=768 , snake_case__ : Union[str, Any]=910 , snake_case__ : Any=512 , snake_case__ : Tuple=24858 , snake_case__ : Optional[Any]=True , **snake_case__ : Union[str, Any] , ):
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = enable_pronunciation
lowerCAmelCase__ = enable_shape
lowerCAmelCase__ = pronunciation_embed_dim
lowerCAmelCase__ = pronunciation_vocab_size
lowerCAmelCase__ = shape_embed_dim
lowerCAmelCase__ = shape_vocab_size
lowerCAmelCase__ = concat_input
lowerCAmelCase__ = position_embedding_type
lowerCAmelCase__ = classifier_dropout
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
| 674 | """simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ = 50 ):
"""simple docstring"""
lowerCAmelCase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"{solution() = }")
| 674 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any]=7 , snake_case__ : List[str]=3 , snake_case__ : Tuple=18 , snake_case__ : Dict=30 , snake_case__ : List[str]=400 , snake_case__ : Union[str, Any]=True , snake_case__ : int=None , snake_case__ : int=True , ):
lowerCAmelCase__ = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = apply_ocr
def _SCREAMING_SNAKE_CASE ( self : str ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : List[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = LayoutLMvaImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case__ , """size""" ) )
self.assertTrue(hasattr(snake_case__ , """apply_ocr""" ) )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def _SCREAMING_SNAKE_CASE ( self : str ):
pass
def _SCREAMING_SNAKE_CASE ( self : Dict ):
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , snake_case__ )
self.assertIsInstance(encoding.boxes , snake_case__ )
# Test batched
lowerCAmelCase__ = image_processing(snake_case__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _SCREAMING_SNAKE_CASE ( self : int ):
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase__ = image_processing(snake_case__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase__ = image_processing(snake_case__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
# with apply_OCR = True
lowerCAmelCase__ = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCAmelCase__ = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
lowerCAmelCase__ = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
lowerCAmelCase__ = image_processing(snake_case__ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCAmelCase__ = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
lowerCAmelCase__ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , snake_case__ )
self.assertListEqual(encoding.boxes , snake_case__ )
# with apply_OCR = False
lowerCAmelCase__ = LayoutLMvaImageProcessor(apply_ocr=snake_case__ )
lowerCAmelCase__ = image_processing(snake_case__ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 674 | """simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : Any = "The Nymphenburg Palace is a beautiful palace in Munich!"
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = {
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 1024,
"""hidden_size""": 768,
"""max_length""": 512,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 1024,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1e-5,
"""token_type_vocab_size""": 2,
}
lowerCAmelCase__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowerCAmelCase__ = BERTEncoder(
attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=lowerCamelCase__ , output_all_encodings=lowerCamelCase__ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , lowerCamelCase__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowerCAmelCase__ = """openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
lowerCAmelCase__ = os.path.join(get_home_dir() , """models""" )
lowerCAmelCase__ = _load_vocab(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , cls=lowerCamelCase__ )
lowerCAmelCase__ = nlp.model.BERTModel(
lowerCamelCase__ , len(lowerCamelCase__ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=lowerCamelCase__ , use_token_type_embed=lowerCamelCase__ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=lowerCamelCase__ , use_decoder=lowerCamelCase__ , )
original_bort.load_parameters(lowerCamelCase__ , cast_dtype=lowerCamelCase__ , ignore_extra=lowerCamelCase__ )
lowerCAmelCase__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
lowerCAmelCase__ = {
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.02,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(lowerCamelCase__ ),
}
lowerCAmelCase__ = BertConfig.from_dict(lowerCamelCase__ )
lowerCAmelCase__ = BertForMaskedLM(lowerCamelCase__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase__ = hf_param.shape
lowerCAmelCase__ = to_torch(params[gluon_param] )
lowerCAmelCase__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowerCAmelCase__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowerCAmelCase__ = hf_bort_model.bert.encoder.layer[i]
# self attention
lowerCAmelCase__ = layer.attention.self
lowerCAmelCase__ = check_and_map_params(
self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
lowerCAmelCase__ = layer.attention.output
lowerCAmelCase__ = check_and_map_params(
self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
lowerCAmelCase__ = layer.intermediate
lowerCAmelCase__ = check_and_map_params(
intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
lowerCAmelCase__ = check_and_map_params(
intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
lowerCAmelCase__ = layer.output
lowerCAmelCase__ = check_and_map_params(
bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
lowerCAmelCase__ = RobertaTokenizer.from_pretrained("""roberta-base""" )
lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ )["""input_ids"""]
# Get gluon output
lowerCAmelCase__ = mx.nd.array([input_ids] )
lowerCAmelCase__ = original_bort(inputs=lowerCamelCase__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase__ )
lowerCAmelCase__ = BertModel.from_pretrained(lowerCamelCase__ )
hf_bort_model.eval()
lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ , return_tensors="""pt""" )
lowerCAmelCase__ = hf_bort_model(**lowerCamelCase__ )[0]
lowerCAmelCase__ = output_gluon[0].asnumpy()
lowerCAmelCase__ = output_hf[0].detach().numpy()
lowerCAmelCase__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowerCAmelCase__ = np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
if success:
print("""✔️ Both model do output the same tensors""" )
else:
print("""❌ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""" , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : str = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 674 | 1 |
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class a_ ( nn.Module ):
def __init__( self : Optional[int] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int=0.0 , snake_case__ : Optional[int] = None , snake_case__ : str = "geglu" , snake_case__ : Optional[int] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : str = "layer_norm" , snake_case__ : bool = False , ):
super().__init__()
lowerCAmelCase__ = only_cross_attention
lowerCAmelCase__ = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
lowerCAmelCase__ = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
lowerCAmelCase__ = AdaLayerNorm(snake_case__ , snake_case__ )
elif self.use_ada_layer_norm_zero:
lowerCAmelCase__ = AdaLayerNormZero(snake_case__ , snake_case__ )
else:
lowerCAmelCase__ = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ )
lowerCAmelCase__ = Attention(
query_dim=snake_case__ , heads=snake_case__ , dim_head=snake_case__ , dropout=snake_case__ , bias=snake_case__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=snake_case__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
lowerCAmelCase__ = (
AdaLayerNorm(snake_case__ , snake_case__ )
if self.use_ada_layer_norm
else nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ )
)
lowerCAmelCase__ = Attention(
query_dim=snake_case__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=snake_case__ , dim_head=snake_case__ , dropout=snake_case__ , bias=snake_case__ , upcast_attention=snake_case__ , ) # is self-attn if encoder_hidden_states is none
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = None
# 3. Feed-forward
lowerCAmelCase__ = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ )
lowerCAmelCase__ = FeedForward(snake_case__ , dropout=snake_case__ , activation_fn=snake_case__ , final_dropout=snake_case__ )
# let chunk size default to None
lowerCAmelCase__ = None
lowerCAmelCase__ = 0
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : int ):
# Sets chunk feed-forward
lowerCAmelCase__ = chunk_size
lowerCAmelCase__ = dim
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : torch.FloatTensor , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[torch.LongTensor] = None , snake_case__ : Dict[str, Any] = None , snake_case__ : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
lowerCAmelCase__ = self.norma(snake_case__ , snake_case__ )
elif self.use_ada_layer_norm_zero:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.norma(
snake_case__ , snake_case__ , snake_case__ , hidden_dtype=hidden_states.dtype )
else:
lowerCAmelCase__ = self.norma(snake_case__ )
lowerCAmelCase__ = cross_attention_kwargs if cross_attention_kwargs is not None else {}
lowerCAmelCase__ = self.attna(
snake_case__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=snake_case__ , **snake_case__ , )
if self.use_ada_layer_norm_zero:
lowerCAmelCase__ = gate_msa.unsqueeze(1 ) * attn_output
lowerCAmelCase__ = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
lowerCAmelCase__ = (
self.norma(snake_case__ , snake_case__ ) if self.use_ada_layer_norm else self.norma(snake_case__ )
)
lowerCAmelCase__ = self.attna(
snake_case__ , encoder_hidden_states=snake_case__ , attention_mask=snake_case__ , **snake_case__ , )
lowerCAmelCase__ = attn_output + hidden_states
# 3. Feed-forward
lowerCAmelCase__ = self.norma(snake_case__ )
if self.use_ada_layer_norm_zero:
lowerCAmelCase__ = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
lowerCAmelCase__ = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
lowerCAmelCase__ = torch.cat(
[self.ff(snake_case__ ) for hid_slice in norm_hidden_states.chunk(snake_case__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
lowerCAmelCase__ = self.ff(snake_case__ )
if self.use_ada_layer_norm_zero:
lowerCAmelCase__ = gate_mlp.unsqueeze(1 ) * ff_output
lowerCAmelCase__ = ff_output + hidden_states
return hidden_states
class a_ ( nn.Module ):
def __init__( self : str , snake_case__ : int , snake_case__ : Optional[int] = None , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : str = "geglu" , snake_case__ : bool = False , ):
super().__init__()
lowerCAmelCase__ = int(dim * mult )
lowerCAmelCase__ = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
lowerCAmelCase__ = GELU(snake_case__ , snake_case__ )
if activation_fn == "gelu-approximate":
lowerCAmelCase__ = GELU(snake_case__ , snake_case__ , approximate="""tanh""" )
elif activation_fn == "geglu":
lowerCAmelCase__ = GEGLU(snake_case__ , snake_case__ )
elif activation_fn == "geglu-approximate":
lowerCAmelCase__ = ApproximateGELU(snake_case__ , snake_case__ )
lowerCAmelCase__ = nn.ModuleList([] )
# project in
self.net.append(snake_case__ )
# project dropout
self.net.append(nn.Dropout(snake_case__ ) )
# project out
self.net.append(nn.Linear(snake_case__ , snake_case__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(snake_case__ ) )
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : Dict ):
for module in self.net:
lowerCAmelCase__ = module(snake_case__ )
return hidden_states
class a_ ( nn.Module ):
def __init__( self : Tuple , snake_case__ : int , snake_case__ : int , snake_case__ : str = "none" ):
super().__init__()
lowerCAmelCase__ = nn.Linear(snake_case__ , snake_case__ )
lowerCAmelCase__ = approximate
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : str ):
if gate.device.type != "mps":
return F.gelu(snake_case__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : List[Any] ):
lowerCAmelCase__ = self.proj(snake_case__ )
lowerCAmelCase__ = self.gelu(snake_case__ )
return hidden_states
class a_ ( nn.Module ):
def __init__( self : int , snake_case__ : int , snake_case__ : int ):
super().__init__()
lowerCAmelCase__ = nn.Linear(snake_case__ , dim_out * 2 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : int ):
if gate.device.type != "mps":
return F.gelu(snake_case__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : Any ):
lowerCAmelCase__ , lowerCAmelCase__ = self.proj(snake_case__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(snake_case__ )
class a_ ( nn.Module ):
def __init__( self : Union[str, Any] , snake_case__ : int , snake_case__ : int ):
super().__init__()
lowerCAmelCase__ = nn.Linear(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : List[Any] ):
lowerCAmelCase__ = self.proj(snake_case__ )
return x * torch.sigmoid(1.702 * x )
class a_ ( nn.Module ):
def __init__( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
super().__init__()
lowerCAmelCase__ = nn.Embedding(snake_case__ , snake_case__ )
lowerCAmelCase__ = nn.SiLU()
lowerCAmelCase__ = nn.Linear(snake_case__ , embedding_dim * 2 )
lowerCAmelCase__ = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : List[Any] , snake_case__ : Optional[Any] ):
lowerCAmelCase__ = self.linear(self.silu(self.emb(snake_case__ ) ) )
lowerCAmelCase__ , lowerCAmelCase__ = torch.chunk(snake_case__ , 2 )
lowerCAmelCase__ = self.norm(snake_case__ ) * (1 + scale) + shift
return x
class a_ ( nn.Module ):
def __init__( self : List[str] , snake_case__ : int , snake_case__ : List[Any] ):
super().__init__()
lowerCAmelCase__ = CombinedTimestepLabelEmbeddings(snake_case__ , snake_case__ )
lowerCAmelCase__ = nn.SiLU()
lowerCAmelCase__ = nn.Linear(snake_case__ , 6 * embedding_dim , bias=snake_case__ )
lowerCAmelCase__ = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ , eps=1E-6 )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict=None ):
lowerCAmelCase__ = self.linear(self.silu(self.emb(snake_case__ , snake_case__ , hidden_dtype=snake_case__ ) ) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = emb.chunk(6 , dim=1 )
lowerCAmelCase__ = self.norm(snake_case__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class a_ ( nn.Module ):
def __init__( self : str , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[str] = None , snake_case__ : float = 1E-5 ):
super().__init__()
lowerCAmelCase__ = num_groups
lowerCAmelCase__ = eps
if act_fn is None:
lowerCAmelCase__ = None
else:
lowerCAmelCase__ = get_activation(snake_case__ )
lowerCAmelCase__ = nn.Linear(snake_case__ , out_dim * 2 )
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : List[str] , snake_case__ : Tuple ):
if self.act:
lowerCAmelCase__ = self.act(snake_case__ )
lowerCAmelCase__ = self.linear(snake_case__ )
lowerCAmelCase__ = emb[:, :, None, None]
lowerCAmelCase__ , lowerCAmelCase__ = emb.chunk(2 , dim=1 )
lowerCAmelCase__ = F.group_norm(snake_case__ , self.num_groups , eps=self.eps )
lowerCAmelCase__ = x * (1 + scale) + shift
return x
| 674 | """simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self : Optional[int] ):
lowerCAmelCase__ = """"""
lowerCAmelCase__ = """"""
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = 256
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Union[str, Any] ):
lowerCAmelCase__ = cva.imread(snake_case__ , 0 )
lowerCAmelCase__ = copy.deepcopy(self.img )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="""x""" )
lowerCAmelCase__ = np.sum(snake_case__ )
for i in range(len(snake_case__ ) ):
lowerCAmelCase__ = x[i] / self.k
self.sk += prk
lowerCAmelCase__ = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase__ = int(last % last )
lowerCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case__ )
lowerCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase__ = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase__ = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
__lowerCAmelCase : Dict = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
__lowerCAmelCase : Optional[int] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 674 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : Tuple = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 674 | """simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : List[str] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCamelCase_ : str = "CIDAS/clipseg-rd64-refined"
UpperCamelCase_ : Any = "image_segmenter"
UpperCamelCase_ : Optional[Any] = CLIPSegForImageSegmentation
UpperCamelCase_ : List[str] = ["image", "text"]
UpperCamelCase_ : int = ["image"]
def __init__( self : Tuple , *snake_case__ : str , **snake_case__ : Optional[Any] ):
requires_backends(self , ["""vision"""] )
super().__init__(*snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : "Image" , snake_case__ : str ):
return self.pre_processor(text=[label] , images=[image] , padding=snake_case__ , return_tensors="""pt""" )
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Tuple ):
with torch.no_grad():
lowerCAmelCase__ = self.model(**snake_case__ ).logits
return logits
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : List[Any] ):
lowerCAmelCase__ = outputs.cpu().detach().numpy()
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 674 | 1 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
class a_ ( __UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ : Dict = 1
@register_to_config
def __init__( self : Optional[int] , snake_case__ : int = 2000 , snake_case__ : float = 0.15 , snake_case__ : float = 0.01 , snake_case__ : float = 1348.0 , snake_case__ : float = 1E-5 , snake_case__ : int = 1 , ):
# standard deviation of the initial noise distribution
lowerCAmelCase__ = sigma_max
# setable values
lowerCAmelCase__ = None
self.set_sigmas(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : torch.FloatTensor , snake_case__ : Optional[int] = None ):
return sample
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : int , snake_case__ : float = None , snake_case__ : Union[str, torch.device] = None ):
lowerCAmelCase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowerCAmelCase__ = torch.linspace(1 , snake_case__ , snake_case__ , device=snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : int , snake_case__ : float = None , snake_case__ : float = None , snake_case__ : float = None ):
lowerCAmelCase__ = sigma_min if sigma_min is not None else self.config.sigma_min
lowerCAmelCase__ = sigma_max if sigma_max is not None else self.config.sigma_max
lowerCAmelCase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(snake_case__ , snake_case__ )
lowerCAmelCase__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowerCAmelCase__ = torch.exp(torch.linspace(math.log(snake_case__ ) , math.log(snake_case__ ) , snake_case__ ) )
lowerCAmelCase__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : int , snake_case__ : int ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Optional[torch.Generator] = None , snake_case__ : bool = True , ):
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
lowerCAmelCase__ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowerCAmelCase__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowerCAmelCase__ = timesteps.to(self.discrete_sigmas.device )
lowerCAmelCase__ = self.discrete_sigmas[timesteps].to(sample.device )
lowerCAmelCase__ = self.get_adjacent_sigma(snake_case__ , snake_case__ ).to(sample.device )
lowerCAmelCase__ = torch.zeros_like(snake_case__ )
lowerCAmelCase__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowerCAmelCase__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowerCAmelCase__ = diffusion.unsqueeze(-1 )
lowerCAmelCase__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowerCAmelCase__ = randn_tensor(
sample.shape , layout=sample.layout , generator=snake_case__ , device=sample.device , dtype=sample.dtype )
lowerCAmelCase__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowerCAmelCase__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=snake_case__ , prev_sample_mean=snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , snake_case__ : Optional[torch.Generator] = None , snake_case__ : bool = True , ):
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowerCAmelCase__ = randn_tensor(sample.shape , layout=sample.layout , generator=snake_case__ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowerCAmelCase__ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
lowerCAmelCase__ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
lowerCAmelCase__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowerCAmelCase__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowerCAmelCase__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowerCAmelCase__ = step_size.unsqueeze(-1 )
lowerCAmelCase__ = sample + step_size * model_output
lowerCAmelCase__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowerCAmelCase__ = timesteps.to(original_samples.device )
lowerCAmelCase__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowerCAmelCase__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(snake_case__ ) * sigmas[:, None, None, None]
)
lowerCAmelCase__ = noise + original_samples
return noisy_samples
def __len__( self : List[Any] ):
return self.config.num_train_timesteps
| 674 | """simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = LayoutLMTokenizer
UpperCamelCase_ : List[Any] = LayoutLMTokenizerFast
UpperCamelCase_ : Dict = True
UpperCamelCase_ : Any = True
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
super().setUp()
lowerCAmelCase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : int , **snake_case__ : Union[str, Any] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Tuple ):
lowerCAmelCase__ = """UNwant\u00E9d,running"""
lowerCAmelCase__ = """unwanted, running"""
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(snake_case__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [7, 4, 5, 10, 8, 9] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
| 674 | 1 |
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : str = TypeVar("DatasetType", Dataset, IterableDataset)
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "first_exhausted" , ):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(lowerCamelCase__ ):
if not isinstance(lowerCamelCase__ , (Dataset, IterableDataset) ):
if isinstance(lowerCamelCase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"""is an empty dataset dictionary.""" )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(lowerCamelCase__ )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCamelCase__ ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCamelCase__ ).__name__}.""" )
if i == 0:
lowerCAmelCase__ , lowerCAmelCase__ = (
(Dataset, IterableDataset) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , info=lowerCamelCase__ , split=lowerCamelCase__ , stopping_strategy=lowerCamelCase__ )
else:
return _interleave_iterable_datasets(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , info=lowerCamelCase__ , split=lowerCamelCase__ , stopping_strategy=lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 0 , ):
"""simple docstring"""
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(lowerCamelCase__ ):
if not isinstance(lowerCamelCase__ , (Dataset, IterableDataset) ):
if isinstance(lowerCamelCase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"""is an empty dataset dictionary.""" )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(lowerCamelCase__ )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCamelCase__ ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCamelCase__ ).__name__}.""" )
if i == 0:
lowerCAmelCase__ , lowerCAmelCase__ = (
(Dataset, IterableDataset) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowerCamelCase__ , info=lowerCamelCase__ , split=lowerCamelCase__ , axis=lowerCamelCase__ )
else:
return _concatenate_iterable_datasets(lowerCamelCase__ , info=lowerCamelCase__ , split=lowerCamelCase__ , axis=lowerCamelCase__ )
| 674 | """simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__lowerCAmelCase : Any = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
def __init__( self : List[str] , snake_case__ : int = 14 ):
if group not in primes:
raise ValueError("""Unsupported Group""" )
lowerCAmelCase__ = primes[group]["""prime"""]
lowerCAmelCase__ = primes[group]["""generator"""]
lowerCAmelCase__ = int(hexlify(urandom(32 ) ) , base=16 )
def _SCREAMING_SNAKE_CASE ( self : Any ):
return hex(self.__private_key )[2:]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = pow(self.generator , self.__private_key , self.prime )
return hex(snake_case__ )[2:]
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(snake_case__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : str ):
lowerCAmelCase__ = int(snake_case__ , base=16 )
if not self.is_valid_public_key(snake_case__ ):
raise ValueError("""Invalid public key""" )
lowerCAmelCase__ = pow(snake_case__ , self.__private_key , self.prime )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case__ : int , snake_case__ : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(snake_case__ , (prime - 1) // 2 , snake_case__ ) == 1
)
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case__ : str , snake_case__ : str , snake_case__ : int = 14 ):
lowerCAmelCase__ = int(snake_case__ , base=16 )
lowerCAmelCase__ = int(snake_case__ , base=16 )
lowerCAmelCase__ = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(snake_case__ , snake_case__ ):
raise ValueError("""Invalid public key""" )
lowerCAmelCase__ = pow(snake_case__ , snake_case__ , snake_case__ )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 674 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : str = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 674 | """simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(weights[0] )
lowerCAmelCase__ = np.asarray(weights[1] )
lowerCAmelCase__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(weights[0] )
lowerCAmelCase__ = np.asarray(weights[1] )
lowerCAmelCase__ = np.asarray(weights[2] )
lowerCAmelCase__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = weights[0][0][0]
lowerCAmelCase__ = np.asarray(layer_norm_a[0] )
lowerCAmelCase__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# lsh weights + output
lowerCAmelCase__ = weights[0][1]
if len(lowerCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
else:
set_layer_weights_in_torch_local(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
# intermediate weighs
lowerCAmelCase__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase__ ) == 4:
lowerCAmelCase__ = intermediate_weights[2]
# layernorm 2
lowerCAmelCase__ = np.asarray(intermediate_weights[0][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# intermediate dense
lowerCAmelCase__ = np.asarray(intermediate_weights[1][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
# intermediate out
lowerCAmelCase__ = np.asarray(intermediate_weights[4][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = torch_model.reformer
# word embeds
lowerCAmelCase__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase__ ) , )
if isinstance(weights[3] , lowerCamelCase__ ):
lowerCAmelCase__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCAmelCase__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
lowerCAmelCase__ = nn.Parameter(torch.tensor(lowerCamelCase__ ) )
lowerCAmelCase__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCAmelCase__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# output layer norm
lowerCAmelCase__ = np.asarray(weights[7][0] )
lowerCAmelCase__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# output embeddings
lowerCAmelCase__ = np.asarray(weights[9][0] )
lowerCAmelCase__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = ReformerConfig.from_json_file(lowerCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = ReformerModelWithLMHead(lowerCamelCase__ )
with open(lowerCamelCase__ , """rb""" ) as f:
lowerCAmelCase__ = pickle.load(lowerCamelCase__ )["""weights"""]
set_model_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 674 | 1 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__lowerCAmelCase : Dict = "bart"
__lowerCAmelCase : Union[str, Any] = True
@st.cache(allow_output_mutation=lowerCamelCase__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
if LOAD_DENSE_INDEX:
lowerCAmelCase__ = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
lowerCAmelCase__ = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
lowerCAmelCase__ = qar_model.eval()
else:
lowerCAmelCase__ , lowerCAmelCase__ = (None, None)
if MODEL_TYPE == "bart":
lowerCAmelCase__ = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
lowerCAmelCase__ = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
lowerCAmelCase__ = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
lowerCAmelCase__ = sas_model.eval()
else:
lowerCAmelCase__ , lowerCAmelCase__ = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCamelCase__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
if LOAD_DENSE_INDEX:
lowerCAmelCase__ = faiss.StandardGpuResources()
lowerCAmelCase__ = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
lowerCAmelCase__ = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
lowerCAmelCase__ = faiss.IndexFlatIP(128 )
lowerCAmelCase__ = faiss.index_cpu_to_gpu(lowerCamelCase__ , 1 , lowerCamelCase__ )
wikiaab_gpu_index_flat.add(lowerCamelCase__ ) # TODO fix for larger GPU
else:
lowerCAmelCase__ , lowerCAmelCase__ = (None, None)
lowerCAmelCase__ = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCamelCase__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
lowerCAmelCase__ = elia["""train_eli5"""]
lowerCAmelCase__ = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
lowerCAmelCase__ = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowerCamelCase__ )
return (elia_train, eli5_train_q_index)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = load_indexes()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = load_models()
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = load_train_data()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__=10 ):
"""simple docstring"""
lowerCAmelCase__ = embed_questions_for_retrieval([question] , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ , lowerCAmelCase__ = eli5_train_q_index.search(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = [elia_train[int(lowerCamelCase__ )] for i in I[0]]
return nn_examples
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__="wiki40b" , lowerCamelCase__="dense" , lowerCamelCase__=10 ):
"""simple docstring"""
if source == "none":
lowerCAmelCase__ , lowerCAmelCase__ = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
lowerCAmelCase__ , lowerCAmelCase__ = query_qa_dense_index(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
lowerCAmelCase__ , lowerCAmelCase__ = query_es_index(
lowerCamelCase__ , lowerCamelCase__ , index_name="""english_wiki40b_snippets_100w""" , n_results=lowerCamelCase__ , )
lowerCAmelCase__ = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
lowerCAmelCase__ = """question: {} context: {}""".format(lowerCamelCase__ , lowerCamelCase__ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCamelCase__ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCamelCase__ : None),
} )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=64 , lowerCamelCase__=256 , lowerCamelCase__=False , lowerCamelCase__=2 , lowerCamelCase__=0.95 , lowerCamelCase__=0.8 ):
"""simple docstring"""
with torch.no_grad():
lowerCAmelCase__ = qa_sas_generate(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , num_answers=1 , num_beams=lowerCamelCase__ , min_len=lowerCamelCase__ , max_len=lowerCamelCase__ , do_sample=lowerCamelCase__ , temp=lowerCamelCase__ , top_p=lowerCamelCase__ , top_k=lowerCamelCase__ , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__lowerCAmelCase : Tuple = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__lowerCAmelCase : List[Any] = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__lowerCAmelCase : Any = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__lowerCAmelCase : Union[str, Any] = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__lowerCAmelCase : Tuple = st.sidebar.checkbox("Demo options")
if demo_options:
__lowerCAmelCase : Optional[int] = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__lowerCAmelCase : Tuple = action_list.index(action_st)
__lowerCAmelCase : Optional[Any] = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__lowerCAmelCase : Optional[Any] = show_type == "Show full text of passages"
else:
__lowerCAmelCase : Any = 3
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Any = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__lowerCAmelCase : List[Any] = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__lowerCAmelCase : str = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__lowerCAmelCase : Optional[Any] = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__lowerCAmelCase : Any = "wiki40b"
__lowerCAmelCase : Optional[int] = "dense"
__lowerCAmelCase : Tuple = "beam"
__lowerCAmelCase : Optional[int] = 2
__lowerCAmelCase : int = 64
__lowerCAmelCase : Optional[Any] = 2_56
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : int = None
__lowerCAmelCase : str = st.sidebar.checkbox("Generation options")
if generate_options:
__lowerCAmelCase : List[Any] = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__lowerCAmelCase : List[Any] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__lowerCAmelCase : Union[str, Any] = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
__lowerCAmelCase : List[str] = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
__lowerCAmelCase : List[Any] = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__lowerCAmelCase : Optional[Any] = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__lowerCAmelCase : str = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__lowerCAmelCase : List[Any] = None
# start main text
__lowerCAmelCase : List[str] = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__lowerCAmelCase : Optional[int] = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__lowerCAmelCase : int = st.text_input("Enter your question here:", "")
else:
__lowerCAmelCase : Optional[Any] = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__lowerCAmelCase , __lowerCAmelCase : int = make_support(question, source=wiki_source, method="dense", n_results=10)
__lowerCAmelCase , __lowerCAmelCase : Dict = make_support(question, source=wiki_source, method="sparse", n_results=10)
__lowerCAmelCase : str = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__lowerCAmelCase : Optional[int] = support_list[:10]
__lowerCAmelCase : List[str] = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__lowerCAmelCase , __lowerCAmelCase : str = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__lowerCAmelCase , __lowerCAmelCase : Any = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__lowerCAmelCase : int = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__lowerCAmelCase : Dict = res[1].strip()
if sec_titles == "":
__lowerCAmelCase : Dict = "[{}]({})".format(res[0], wiki_url)
else:
__lowerCAmelCase : str = sec_titles.split(" & ")
__lowerCAmelCase : Optional[int] = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__lowerCAmelCase : int = find_nearest_training(question)
__lowerCAmelCase : Any = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__lowerCAmelCase : int = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__lowerCAmelCase : Optional[Any] = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 674 | """simple docstring"""
import os
from math import logaa
def _UpperCAmelCase ( lowerCamelCase__ = "base_exp.txt" ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCamelCase__ ) , lowerCamelCase__ ) ) ):
lowerCAmelCase__ , lowerCAmelCase__ = list(map(lowerCamelCase__ , line.split(""",""" ) ) )
if x * logaa(lowerCamelCase__ ) > largest:
lowerCAmelCase__ = x * logaa(lowerCamelCase__ )
lowerCAmelCase__ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 674 | 1 |
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ = 10**12 ):
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
lowerCAmelCase__ = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F"{solution() = }")
| 674 | """simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
while b:
lowerCAmelCase__ , lowerCAmelCase__ = b, a % b
return a
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCamelCase__ , a % b )
def _UpperCAmelCase ( ):
"""simple docstring"""
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 674 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : int = logging.get_logger(__name__)
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Optional[Any] = "timm_backbone"
def __init__( self : Optional[int] , snake_case__ : int=None , snake_case__ : List[str]=3 , snake_case__ : str=True , snake_case__ : Dict=True , snake_case__ : List[str]=None , **snake_case__ : Optional[Any] , ):
super().__init__(**snake_case__ )
lowerCAmelCase__ = backbone
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = features_only
lowerCAmelCase__ = use_pretrained_backbone
lowerCAmelCase__ = True
lowerCAmelCase__ = out_indices if out_indices is not None else (-1,)
| 674 | """simple docstring"""
import os
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = os.path.dirname(os.path.realpath(lowerCamelCase__ ) )
lowerCAmelCase__ = os.path.join(lowerCamelCase__ , """triangle.txt""" )
with open(lowerCamelCase__ ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = []
for line in triangle:
lowerCAmelCase__ = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(lowerCamelCase__ ) )
a.append(lowerCamelCase__ )
for i in range(1 , len(lowerCamelCase__ ) ):
for j in range(len(a[i] ) ):
lowerCAmelCase__ = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowerCAmelCase__ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowerCamelCase__ , lowerCamelCase__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 674 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a_ :
UpperCamelCase_ : int
UpperCamelCase_ : Node | None = None
UpperCamelCase_ : Node | None = None
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = Node(1 )
lowerCAmelCase__ = Node(2 )
lowerCAmelCase__ = Node(3 )
lowerCAmelCase__ = Node(4 )
lowerCAmelCase__ = Node(5 )
return tree
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = []
if root is None:
return output
lowerCAmelCase__ = deque([root] )
while process_queue:
lowerCAmelCase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = []
def populate_output(lowerCamelCase__ , lowerCamelCase__ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase__ , lowerCamelCase__ )
return output
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = []
def populate_output(lowerCamelCase__ , lowerCamelCase__ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase__ , lowerCamelCase__ )
return output
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if root is None:
return []
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = height(lowerCamelCase__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase__ , lowerCamelCase__ ) )
lowerCAmelCase__ = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase__ , lowerCamelCase__ ) )
lowerCAmelCase__ = 0
return output
def _UpperCAmelCase ( ): # Main function for testing.
"""simple docstring"""
lowerCAmelCase__ = make_tree()
print(f"""In-order Traversal: {inorder(lowerCamelCase__ )}""" )
print(f"""Pre-order Traversal: {preorder(lowerCamelCase__ )}""" )
print(f"""Post-order Traversal: {postorder(lowerCamelCase__ )}""" , """\n""" )
print(f"""Height of Tree: {height(lowerCamelCase__ )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(lowerCamelCase__ ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(lowerCamelCase__ ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(lowerCamelCase__ , level=lowerCamelCase__ ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(lowerCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 674 | """simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__lowerCAmelCase : Union[str, Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
__lowerCAmelCase : Optional[int] = json.load(f)
@require_torch
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Dict ):
return FSMTTokenizer.from_pretrained(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Any ):
lowerCAmelCase__ = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Any , snake_case__ : int ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowerCAmelCase__ = F"""facebook/wmt19-{pair}"""
lowerCAmelCase__ = self.get_tokenizer(snake_case__ )
lowerCAmelCase__ = self.get_model(snake_case__ )
lowerCAmelCase__ = bleu_data[pair]["""src"""]
lowerCAmelCase__ = bleu_data[pair]["""tgt"""]
lowerCAmelCase__ = tokenizer(snake_case__ , return_tensors="""pt""" , truncation=snake_case__ , padding="""longest""" ).to(snake_case__ )
lowerCAmelCase__ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCAmelCase__ = tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
lowerCAmelCase__ = calculate_bleu(snake_case__ , snake_case__ )
print(snake_case__ )
self.assertGreaterEqual(scores["""bleu"""] , snake_case__ )
| 674 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : int = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 674 | """simple docstring"""
import pprint
import requests
__lowerCAmelCase : Union[str, Any] = "https://zenquotes.io/api"
def _UpperCAmelCase ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def _UpperCAmelCase ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = random_quotes()
pprint.pprint(response)
| 674 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Union[str, Any] = DDIMPipeline
UpperCamelCase_ : Tuple = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCamelCase_ : Optional[int] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
UpperCamelCase_ : int = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase_ : List[str] = False
def _SCREAMING_SNAKE_CASE ( self : Any ):
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
lowerCAmelCase__ = DDIMScheduler()
lowerCAmelCase__ = {"""unet""": unet, """scheduler""": scheduler}
return components
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : int , snake_case__ : Dict=0 ):
if str(snake_case__ ).startswith("""mps""" ):
lowerCAmelCase__ = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase__ = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase__ = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = """cpu"""
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase__ = pipe(**snake_case__ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
lowerCAmelCase__ = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
lowerCAmelCase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case__ , 1E-3 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
super().test_save_load_local(expected_max_difference=3E-3 )
def _SCREAMING_SNAKE_CASE ( self : Any ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = """google/ddpm-cifar10-32"""
lowerCAmelCase__ = UNetaDModel.from_pretrained(snake_case__ )
lowerCAmelCase__ = DDIMScheduler()
lowerCAmelCase__ = DDIMPipeline(unet=snake_case__ , scheduler=snake_case__ )
ddim.to(snake_case__ )
ddim.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = ddim(generator=snake_case__ , eta=0.0 , output_type="""numpy""" ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = """google/ddpm-ema-bedroom-256"""
lowerCAmelCase__ = UNetaDModel.from_pretrained(snake_case__ )
lowerCAmelCase__ = DDIMScheduler.from_pretrained(snake_case__ )
lowerCAmelCase__ = DDIMPipeline(unet=snake_case__ , scheduler=snake_case__ )
ddpm.to(snake_case__ )
ddpm.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = ddpm(generator=snake_case__ , output_type="""numpy""" ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase__ = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 674 | """simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = 0
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ).to_dict()
config_dict.pop("""image_processor_type""" )
lowerCAmelCase__ = CLIPImageProcessor(**snake_case__ )
# save in new folder
model_config.save_pretrained(snake_case__ )
config.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
# make sure private variable is not incorrectly saved
lowerCAmelCase__ = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
with self.assertRaisesRegex(
snake_case__ , """clip-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""clip-base""" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
with self.assertRaisesRegex(
snake_case__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , revision="""aaaaaa""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
with self.assertRaisesRegex(
snake_case__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def _SCREAMING_SNAKE_CASE ( self : Any ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , trust_remote_code=snake_case__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoImageProcessor.register(snake_case__ , snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoImageProcessor.register(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = CustomImageProcessor.from_pretrained(snake_case__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Tuple = True
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoImageProcessor.register(snake_case__ , snake_case__ )
# If remote code is not set, the default is to use local
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(snake_case__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 674 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class a_ :
def __init__( self : Union[str, Any] , snake_case__ : int ):
lowerCAmelCase__ = num_of_nodes
lowerCAmelCase__ = []
lowerCAmelCase__ = {}
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : int , snake_case__ : int , snake_case__ : int ):
self.m_edges.append([u_node, v_node, weight] )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCAmelCase__ = self.find_component(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : list[int] , snake_case__ : int , snake_case__ : int ):
if component_size[u_node] <= component_size[v_node]:
lowerCAmelCase__ = v_node
component_size[v_node] += component_size[u_node]
self.set_component(snake_case__ )
elif component_size[u_node] >= component_size[v_node]:
lowerCAmelCase__ = self.find_component(snake_case__ )
component_size[u_node] += component_size[v_node]
self.set_component(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCAmelCase__ = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = edge
lowerCAmelCase__ = self.m_component[u]
lowerCAmelCase__ = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCAmelCase__ = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = edge
lowerCAmelCase__ = self.m_component[u]
lowerCAmelCase__ = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(snake_case__ , snake_case__ , snake_case__ )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
lowerCAmelCase__ = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def _UpperCAmelCase ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 674 | """simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a_ :
def __init__( self : Optional[int] , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=64 , snake_case__ : Any=None ):
lowerCAmelCase__ = np.random.default_rng(snake_case__ )
lowerCAmelCase__ = length
lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Optional[Any] ):
return self.length
def __getitem__( self : List[str] , snake_case__ : Optional[int] ):
return {"x": self.x[i], "y": self.y[i]}
class a_ ( torch.nn.Module ):
def __init__( self : List[str] , snake_case__ : str=0 , snake_case__ : Dict=0 , snake_case__ : Any=False ):
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = True
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Any=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a[0] + self.b[0]
class a_ ( torch.nn.Module ):
def __init__( self : Any , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=0 , snake_case__ : List[Any]=False ):
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() )
lowerCAmelCase__ = True
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any]=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a + self.b
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase__ = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
lowerCAmelCase__ = load_dataset("""csv""" , data_files=lowerCamelCase__ )
lowerCAmelCase__ = datasets["""train"""].unique("""label""" )
lowerCAmelCase__ = {v: i for i, v in enumerate(lowerCamelCase__ )}
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" )
if "label" in examples:
lowerCAmelCase__ = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCamelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=2 )
lowerCAmelCase__ = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 674 | 1 |
"""simple docstring"""
import logging
from transformers import PretrainedConfig
__lowerCAmelCase : List[str] = logging.getLogger(__name__)
__lowerCAmelCase : Optional[Any] = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Tuple = "bertabs"
def __init__( self : Union[str, Any] , snake_case__ : Dict=30522 , snake_case__ : List[str]=512 , snake_case__ : Any=6 , snake_case__ : Optional[Any]=512 , snake_case__ : Optional[Any]=8 , snake_case__ : Union[str, Any]=512 , snake_case__ : int=0.2 , snake_case__ : Any=6 , snake_case__ : str=768 , snake_case__ : Optional[Any]=8 , snake_case__ : Dict=2048 , snake_case__ : Union[str, Any]=0.2 , **snake_case__ : Optional[int] , ):
super().__init__(**snake_case__ )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = max_pos
lowerCAmelCase__ = enc_layers
lowerCAmelCase__ = enc_hidden_size
lowerCAmelCase__ = enc_heads
lowerCAmelCase__ = enc_ff_size
lowerCAmelCase__ = enc_dropout
lowerCAmelCase__ = dec_layers
lowerCAmelCase__ = dec_hidden_size
lowerCAmelCase__ = dec_heads
lowerCAmelCase__ = dec_ff_size
lowerCAmelCase__ = dec_dropout
| 674 | """simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = MobileBertConfig.from_json_file(lowerCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = MobileBertForPreTraining(lowerCamelCase__ )
# Load weights from tf checkpoint
lowerCAmelCase__ = load_tf_weights_in_mobilebert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 674 | 1 |
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class a_ ( unittest.TestCase ):
UpperCamelCase_ : Optional[Any] = JukeboxTokenizer
UpperCamelCase_ : List[Any] = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Any ):
import torch
lowerCAmelCase__ = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
lowerCAmelCase__ = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
lowerCAmelCase__ = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
import torch
lowerCAmelCase__ = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
lowerCAmelCase__ = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
lowerCAmelCase__ = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 674 | """simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
lowerCAmelCase__ = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(lowerCamelCase__ )
else:
lowerCAmelCase__ = sylvester(number - 1 )
lowerCAmelCase__ = num - 1
lowerCAmelCase__ = num
return lower * upper + 1
if __name__ == "__main__":
print(F"The 8th number in Sylvester's sequence: {sylvester(8)}")
| 674 | 1 |
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return "".join(chr(ord(lowerCamelCase__ ) - 32 ) if """a""" <= char <= """z""" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 674 | """simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Tuple = PegasusTokenizer
UpperCamelCase_ : Any = PegasusTokenizerFast
UpperCamelCase_ : int = True
UpperCamelCase_ : Any = True
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = PegasusTokenizer(snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : Optional[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any] ):
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = """</s>"""
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(snake_case__ ) , 1103 )
def _SCREAMING_SNAKE_CASE ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCAmelCase__ = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
lowerCAmelCase__ = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowerCAmelCase__ = """To ensure a smooth flow of bank resolutions."""
lowerCAmelCase__ = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = ["""This is going to be way too long.""" * 150, """short example"""]
lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
lowerCAmelCase__ = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
@slow
def _SCREAMING_SNAKE_CASE ( self : str ):
# fmt: off
lowerCAmelCase__ = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = PegasusTokenizer
UpperCamelCase_ : Optional[int] = PegasusTokenizerFast
UpperCamelCase_ : Union[str, Any] = True
UpperCamelCase_ : Optional[int] = True
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = PegasusTokenizer(snake_case__ , offset=0 , mask_token_sent=snake_case__ , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict ):
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : List[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Dict ):
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = ["""This is going to be way too long.""" * 1000, """short example"""]
lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
lowerCAmelCase__ = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
lowerCAmelCase__ = self._large_tokenizer(snake_case__ ).input_ids
self.assertListEqual(
snake_case__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 674 | 1 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__lowerCAmelCase : str = logging.get_logger(__name__)
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
def constraint_to_multiple_of(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=0 , lowerCamelCase__=None ):
lowerCAmelCase__ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowerCAmelCase__ = math.floor(val / multiple ) * multiple
if x < min_val:
lowerCAmelCase__ = math.ceil(val / multiple ) * multiple
return x
lowerCAmelCase__ = (output_size, output_size) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else output_size
lowerCAmelCase__ , lowerCAmelCase__ = get_image_size(lowerCamelCase__ )
lowerCAmelCase__ , lowerCAmelCase__ = output_size
# determine new height and width
lowerCAmelCase__ = output_height / input_height
lowerCAmelCase__ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowerCAmelCase__ = scale_width
else:
# fit height
lowerCAmelCase__ = scale_height
lowerCAmelCase__ = constraint_to_multiple_of(scale_height * input_height , multiple=lowerCamelCase__ )
lowerCAmelCase__ = constraint_to_multiple_of(scale_width * input_width , multiple=lowerCamelCase__ )
return (new_height, new_width)
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Dict = ["pixel_values"]
def __init__( self : Tuple , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : PILImageResampling = PILImageResampling.BILINEAR , snake_case__ : bool = False , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 255 , snake_case__ : bool = True , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , **snake_case__ : Dict , ):
super().__init__(**snake_case__ )
lowerCAmelCase__ = size if size is not None else {"""height""": 384, """width""": 384}
lowerCAmelCase__ = get_size_dict(snake_case__ )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = keep_aspect_ratio
lowerCAmelCase__ = ensure_multiple_of
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : np.ndarray , snake_case__ : Dict[str, int] , snake_case__ : bool = False , snake_case__ : int = 1 , snake_case__ : PILImageResampling = PILImageResampling.BICUBIC , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Optional[int] , ):
lowerCAmelCase__ = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowerCAmelCase__ = get_resize_output_image_size(
snake_case__ , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=snake_case__ , multiple=snake_case__ , )
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : np.ndarray , snake_case__ : Union[int, float] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : List[str] , ):
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : np.ndarray , snake_case__ : Union[float, List[float]] , snake_case__ : Union[float, List[float]] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : str , ):
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : ImageInput , snake_case__ : bool = None , snake_case__ : int = None , snake_case__ : bool = None , snake_case__ : int = None , snake_case__ : PILImageResampling = None , snake_case__ : bool = None , snake_case__ : float = None , snake_case__ : bool = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : ChannelDimension = ChannelDimension.FIRST , **snake_case__ : str , ):
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(snake_case__ )
lowerCAmelCase__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowerCAmelCase__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
lowerCAmelCase__ = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
lowerCAmelCase__ = {"""pixel_values""": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : int , snake_case__ : List[Tuple] = None ):
lowerCAmelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(snake_case__ ):
lowerCAmelCase__ = target_sizes.numpy()
lowerCAmelCase__ = []
for idx in range(len(snake_case__ ) ):
lowerCAmelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=snake_case__ )
lowerCAmelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case__ )
else:
lowerCAmelCase__ = logits.argmax(dim=1 )
lowerCAmelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 674 | """simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : int ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case__ , )
assert hasattr(self , """env""" )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Optional[Any] ):
# configuration for running training on smdistributed Model Parallel
lowerCAmelCase__ = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowerCAmelCase__ = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowerCAmelCase__ = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowerCAmelCase__ = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="""py36""" , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : str ):
TrainingJobAnalytics(snake_case__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : List[str] ):
# create estimator
lowerCAmelCase__ = self.create_estimator(snake_case__ )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case__ )
| 674 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase : Optional[Any] = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
__lowerCAmelCase : Dict = {
"distilbert-base-uncased": 5_12,
"distilbert-base-uncased-distilled-squad": 5_12,
"distilbert-base-cased": 5_12,
"distilbert-base-cased-distilled-squad": 5_12,
"distilbert-base-german-cased": 5_12,
"distilbert-base-multilingual-cased": 5_12,
}
__lowerCAmelCase : Union[str, Any] = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : List[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Tuple = ["input_ids", "attention_mask"]
UpperCamelCase_ : int = DistilBertTokenizer
def __init__( self : Optional[int] , snake_case__ : Dict=None , snake_case__ : int=None , snake_case__ : Tuple=True , snake_case__ : Union[str, Any]="[UNK]" , snake_case__ : Tuple="[SEP]" , snake_case__ : List[str]="[PAD]" , snake_case__ : int="[CLS]" , snake_case__ : Any="[MASK]" , snake_case__ : str=True , snake_case__ : str=None , **snake_case__ : Tuple , ):
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case__ ) != tokenize_chinese_chars
):
lowerCAmelCase__ = getattr(snake_case__ , normalizer_state.pop("""type""" ) )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = strip_accents
lowerCAmelCase__ = tokenize_chinese_chars
lowerCAmelCase__ = normalizer_class(**snake_case__ )
lowerCAmelCase__ = do_lower_case
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : int=None ):
lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : str , snake_case__ : Optional[str] = None ):
lowerCAmelCase__ = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 674 | """simple docstring"""
from math import pi, sqrt
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_71.5:
raise OverflowError("""math range error""" )
elif num - int(lowerCamelCase__ ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(lowerCamelCase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _UpperCAmelCase ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(lowerCamelCase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowerCAmelCase : Dict = 1.0
while num:
__lowerCAmelCase : Any = float(input("Gamma of: "))
print(F"gamma({num}) = {gamma(num)}")
print("\nEnter 0 to exit...")
| 674 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class a_ ( __UpperCamelCase ):
pass
class a_ :
def __init__( self : Optional[int] , snake_case__ : Any ):
lowerCAmelCase__ = data
lowerCAmelCase__ = None
def __iter__( self : str ):
lowerCAmelCase__ = self
lowerCAmelCase__ = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(snake_case__ )
yield node.data
lowerCAmelCase__ = node.next_node
@property
def _SCREAMING_SNAKE_CASE ( self : str ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__lowerCAmelCase : Tuple = Node(1)
__lowerCAmelCase : str = Node(2)
__lowerCAmelCase : int = Node(3)
__lowerCAmelCase : Tuple = Node(4)
print(root_node.has_loop) # False
__lowerCAmelCase : Optional[Any] = root_node.next_node
print(root_node.has_loop) # True
__lowerCAmelCase : Dict = Node(5)
__lowerCAmelCase : Any = Node(6)
__lowerCAmelCase : List[str] = Node(5)
__lowerCAmelCase : Dict = Node(6)
print(root_node.has_loop) # False
__lowerCAmelCase : Optional[Any] = Node(1)
print(root_node.has_loop) # False
| 674 | """simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
def __init__( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Any=13 , snake_case__ : int=30 , snake_case__ : int=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : Dict=True , snake_case__ : Optional[int]=True , snake_case__ : List[Any]=32 , snake_case__ : List[str]=2 , snake_case__ : Optional[Any]=4 , snake_case__ : Optional[int]=37 , snake_case__ : Tuple="gelu" , snake_case__ : str=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=10 , snake_case__ : Dict=0.02 , snake_case__ : Union[str, Any]=3 , snake_case__ : str=None , snake_case__ : List[Any]=2 , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scope
lowerCAmelCase__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = num_patches + 2
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
lowerCAmelCase__ = TFDeiTModel(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict ):
lowerCAmelCase__ = TFDeiTForMaskedImageModeling(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = TFDeiTForMaskedImageModeling(snake_case__ )
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Tuple ):
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ )
lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ )
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : Any = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : int = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = TFDeiTModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _SCREAMING_SNAKE_CASE ( self : Any ):
pass
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Dense ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ )
lowerCAmelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Union[str, Any]=False ):
lowerCAmelCase__ = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDeiTModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any ):
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=snake_case__ , return_tensors="""tf""" )
# forward pass
lowerCAmelCase__ = model(**snake_case__ )
# verify the logits
lowerCAmelCase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase__ = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
| 674 | 1 |
"""simple docstring"""
import math
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = input("""Enter message: """ )
lowerCAmelCase__ = int(input(f"""Enter key [2-{len(lowerCamelCase__ ) - 1}]: """ ) )
lowerCAmelCase__ = input("""Encryption/Decryption [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowerCAmelCase__ = encrypt_message(lowerCamelCase__ , lowerCamelCase__ )
elif mode.lower().startswith("""d""" ):
lowerCAmelCase__ = decrypt_message(lowerCamelCase__ , lowerCamelCase__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"""Output:\n{text + '|'}""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = [""""""] * key
for col in range(lowerCamelCase__ ):
lowerCAmelCase__ = col
while pointer < len(lowerCamelCase__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = math.ceil(len(lowerCamelCase__ ) / key )
lowerCAmelCase__ = key
lowerCAmelCase__ = (num_cols * num_rows) - len(lowerCamelCase__ )
lowerCAmelCase__ = [""""""] * num_cols
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
lowerCAmelCase__ = 0
row += 1
return "".join(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 674 | """simple docstring"""
from __future__ import annotations
from math import gcd
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 2 , lowerCamelCase__ = 1 , lowerCamelCase__ = 3 , ):
"""simple docstring"""
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
return (pow(lowerCamelCase__ , 2 ) + step) % modulus
for _ in range(lowerCamelCase__ ):
# These track the position within the cycle detection logic.
lowerCAmelCase__ = seed
lowerCAmelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCAmelCase__ = gcd(hare - tortoise , lowerCamelCase__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCAmelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
__lowerCAmelCase : List[str] = parser.parse_args()
__lowerCAmelCase : Dict = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"{args.num} is probably prime")
else:
__lowerCAmelCase : List[str] = args.num // divisor
print(F"{args.num} = {divisor} * {quotient}")
| 674 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : int = 0
UpperCamelCase_ : bool = False
UpperCamelCase_ : float = 3.0
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : int ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} )
self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"""a""": 2, """c""": 2.25} )
@require_cuda
def _SCREAMING_SNAKE_CASE ( self : str ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
lowerCAmelCase__ = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
lowerCAmelCase__ = Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
lowerCAmelCase__ = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , snake_case__ )
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__lowerCAmelCase : Tuple = Accelerator(kwargs_handlers=[ddp_scaler])
__lowerCAmelCase : Optional[Any] = torch.nn.Linear(1_00, 2_00)
__lowerCAmelCase : Optional[int] = accelerator.prepare(model)
# Check the values changed in kwargs
__lowerCAmelCase : str = ""
__lowerCAmelCase : Optional[Any] = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 674 | """simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = TapasConfig.from_json_file(lowerCamelCase__ )
# set absolute/relative position embeddings parameter
lowerCAmelCase__ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
lowerCAmelCase__ = 4
lowerCAmelCase__ = True
# hparam_utils.py hparams
lowerCAmelCase__ = 0.66_46_94
lowerCAmelCase__ = 0.20_79_51
lowerCAmelCase__ = 0.12_11_94
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = 0.0_35_25_13
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowerCAmelCase__ = 4
lowerCAmelCase__ = False
# hparam_utils.py hparams
lowerCAmelCase__ = 36.45_19
lowerCAmelCase__ = 0.90_34_21
lowerCAmelCase__ = 2_22.0_88
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = 0.76_31_41
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "TABFACT":
lowerCAmelCase__ = TapasForSequenceClassification(config=lowerCamelCase__ )
elif task == "MLM":
lowerCAmelCase__ = TapasForMaskedLM(config=lowerCamelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
lowerCAmelCase__ = TapasModel(config=lowerCamelCase__ )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCamelCase__ )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
lowerCAmelCase__ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 )
tokenizer.save_pretrained(lowerCamelCase__ )
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 674 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
__lowerCAmelCase : Any = {"vocab_file": "spiece.model"}
__lowerCAmelCase : Dict = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
__lowerCAmelCase : Tuple = {
"albert-base-v1": 5_12,
"albert-large-v1": 5_12,
"albert-xlarge-v1": 5_12,
"albert-xxlarge-v1": 5_12,
"albert-base-v2": 5_12,
"albert-large-v2": 5_12,
"albert-xlarge-v2": 5_12,
"albert-xxlarge-v2": 5_12,
}
__lowerCAmelCase : Union[str, Any] = "▁"
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : List[str] = VOCAB_FILES_NAMES
UpperCamelCase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Tuple , snake_case__ : List[str] , snake_case__ : Optional[Any]=True , snake_case__ : Tuple=True , snake_case__ : str=False , snake_case__ : Optional[Any]="[CLS]" , snake_case__ : Optional[int]="[SEP]" , snake_case__ : List[str]="<unk>" , snake_case__ : Optional[int]="[SEP]" , snake_case__ : List[Any]="<pad>" , snake_case__ : Tuple="[CLS]" , snake_case__ : Optional[Any]="[MASK]" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Tuple , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase__ = (
AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ , normalized=snake_case__ )
if isinstance(snake_case__ , snake_case__ )
else mask_token
)
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case__ , remove_space=snake_case__ , keep_accents=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = remove_space
lowerCAmelCase__ = keep_accents
lowerCAmelCase__ = vocab_file
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ):
return len(self.sp_model )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : Tuple , snake_case__ : Any ):
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Dict ):
if self.remove_space:
lowerCAmelCase__ = """ """.join(inputs.strip().split() )
else:
lowerCAmelCase__ = inputs
lowerCAmelCase__ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
lowerCAmelCase__ = unicodedata.normalize("""NFKD""" , snake_case__ )
lowerCAmelCase__ = """""".join([c for c in outputs if not unicodedata.combining(snake_case__ )] )
if self.do_lower_case:
lowerCAmelCase__ = outputs.lower()
return outputs
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : str ):
lowerCAmelCase__ = self.preprocess_text(snake_case__ )
lowerCAmelCase__ = self.sp_model.encode(snake_case__ , out_type=snake_case__ )
lowerCAmelCase__ = []
for piece in pieces:
if len(snake_case__ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowerCAmelCase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case__ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase__ = cur_pieces[1:]
else:
lowerCAmelCase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case__ )
else:
new_pieces.append(snake_case__ )
return new_pieces
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Union[str, Any] ):
return self.sp_model.PieceToId(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : str ):
return self.sp_model.IdToPiece(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Tuple ):
lowerCAmelCase__ = []
lowerCAmelCase__ = """"""
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(snake_case__ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
if not os.path.isdir(snake_case__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ = os.path.join(
snake_case__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , """wb""" ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 674 | """simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ = 50 ):
"""simple docstring"""
lowerCAmelCase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"{solution() = }")
| 674 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__lowerCAmelCase : Optional[Any] = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__lowerCAmelCase : int = "UperNetConfig"
class a_ ( nn.Module ):
def __init__( self : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : Union[int, Tuple[int, int]] , snake_case__ : Union[int, Tuple[int, int], str] = 0 , snake_case__ : bool = False , snake_case__ : Union[int, Tuple[int, int]] = 1 , ):
super().__init__()
lowerCAmelCase__ = nn.Convad(
in_channels=snake_case__ , out_channels=snake_case__ , kernel_size=snake_case__ , padding=snake_case__ , bias=snake_case__ , dilation=snake_case__ , )
lowerCAmelCase__ = nn.BatchNormad(snake_case__ )
lowerCAmelCase__ = nn.ReLU()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : torch.Tensor ):
lowerCAmelCase__ = self.conv(snake_case__ )
lowerCAmelCase__ = self.batch_norm(snake_case__ )
lowerCAmelCase__ = self.activation(snake_case__ )
return output
class a_ ( nn.Module ):
def __init__( self : Any , snake_case__ : int , snake_case__ : int , snake_case__ : int ):
super().__init__()
lowerCAmelCase__ = [
nn.AdaptiveAvgPoolad(snake_case__ ),
UperNetConvModule(snake_case__ , snake_case__ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(snake_case__ ) , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : torch.Tensor ):
lowerCAmelCase__ = input
for layer in self.layers:
lowerCAmelCase__ = layer(snake_case__ )
return hidden_state
class a_ ( nn.Module ):
def __init__( self : Optional[int] , snake_case__ : Tuple[int, ...] , snake_case__ : int , snake_case__ : int , snake_case__ : bool ):
super().__init__()
lowerCAmelCase__ = pool_scales
lowerCAmelCase__ = align_corners
lowerCAmelCase__ = in_channels
lowerCAmelCase__ = channels
lowerCAmelCase__ = []
for i, pool_scale in enumerate(snake_case__ ):
lowerCAmelCase__ = UperNetPyramidPoolingBlock(pool_scale=snake_case__ , in_channels=snake_case__ , channels=snake_case__ )
self.blocks.append(snake_case__ )
self.add_module(str(snake_case__ ) , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : torch.Tensor ):
lowerCAmelCase__ = []
for ppm in self.blocks:
lowerCAmelCase__ = ppm(snake_case__ )
lowerCAmelCase__ = nn.functional.interpolate(
snake_case__ , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners )
ppm_outs.append(snake_case__ )
return ppm_outs
class a_ ( nn.Module ):
def __init__( self : Union[str, Any] , snake_case__ : int , snake_case__ : Optional[Any] ):
super().__init__()
lowerCAmelCase__ = config
lowerCAmelCase__ = config.pool_scales # e.g. (1, 2, 3, 6)
lowerCAmelCase__ = in_channels
lowerCAmelCase__ = config.hidden_size
lowerCAmelCase__ = False
lowerCAmelCase__ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
lowerCAmelCase__ = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
lowerCAmelCase__ = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
lowerCAmelCase__ = nn.ModuleList()
lowerCAmelCase__ = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowerCAmelCase__ = UperNetConvModule(snake_case__ , self.channels , kernel_size=1 )
lowerCAmelCase__ = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(snake_case__ )
self.fpn_convs.append(snake_case__ )
lowerCAmelCase__ = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.apply(self._init_weights )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : List[Any] ):
if isinstance(snake_case__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Optional[int] ):
lowerCAmelCase__ = inputs[-1]
lowerCAmelCase__ = [x]
psp_outs.extend(self.psp_modules(snake_case__ ) )
lowerCAmelCase__ = torch.cat(snake_case__ , dim=1 )
lowerCAmelCase__ = self.bottleneck(snake_case__ )
return output
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : torch.Tensor ):
# build laterals
lowerCAmelCase__ = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(snake_case__ ) )
# build top-down path
lowerCAmelCase__ = len(snake_case__ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowerCAmelCase__ = laterals[i - 1].shape[2:]
lowerCAmelCase__ = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=snake_case__ , mode="""bilinear""" , align_corners=self.align_corners )
# build outputs
lowerCAmelCase__ = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowerCAmelCase__ = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners )
lowerCAmelCase__ = torch.cat(snake_case__ , dim=1 )
lowerCAmelCase__ = self.fpn_bottleneck(snake_case__ )
lowerCAmelCase__ = self.classifier(snake_case__ )
return output
class a_ ( nn.Module ):
def __init__( self : Optional[int] , snake_case__ : List[str] , snake_case__ : int = 2 , snake_case__ : int = 3 , snake_case__ : Union[int, Tuple[int, int]] = 1 ):
super().__init__()
lowerCAmelCase__ = config
lowerCAmelCase__ = config.auxiliary_in_channels
lowerCAmelCase__ = config.auxiliary_channels
lowerCAmelCase__ = config.auxiliary_num_convs
lowerCAmelCase__ = config.auxiliary_concat_input
lowerCAmelCase__ = in_index
lowerCAmelCase__ = (kernel_size // 2) * dilation
lowerCAmelCase__ = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=snake_case__ , padding=snake_case__ , dilation=snake_case__ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=snake_case__ , padding=snake_case__ , dilation=snake_case__ ) )
if self.num_convs == 0:
lowerCAmelCase__ = nn.Identity()
else:
lowerCAmelCase__ = nn.Sequential(*snake_case__ )
if self.concat_input:
lowerCAmelCase__ = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=snake_case__ , padding=kernel_size // 2 )
lowerCAmelCase__ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
self.apply(self._init_weights )
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : Union[str, Any] ):
if isinstance(snake_case__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : torch.Tensor ):
# just take the relevant feature maps
lowerCAmelCase__ = encoder_hidden_states[self.in_index]
lowerCAmelCase__ = self.convs(snake_case__ )
if self.concat_input:
lowerCAmelCase__ = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
lowerCAmelCase__ = self.classifier(snake_case__ )
return output
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Dict = UperNetConfig
UpperCamelCase_ : Tuple = "pixel_values"
UpperCamelCase_ : str = True
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Tuple ):
if isinstance(snake_case__ , snake_case__ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _SCREAMING_SNAKE_CASE ( self : str ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Any=False ):
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase__ = value
__lowerCAmelCase : Optional[int] = R"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__lowerCAmelCase : List[Any] = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , __UpperCamelCase , )
class a_ ( __UpperCamelCase ):
def __init__( self : Dict , snake_case__ : Optional[Any] ):
super().__init__(snake_case__ )
lowerCAmelCase__ = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowerCAmelCase__ = UperNetHead(snake_case__ , in_channels=self.backbone.channels )
lowerCAmelCase__ = UperNetFCNHead(snake_case__ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=snake_case__ , config_class=_CONFIG_FOR_DOC )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Optional[torch.Tensor] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[torch.Tensor] = None , snake_case__ : Optional[bool] = None , ):
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = output_attentions if output_attentions is not None else self.config.output_attentions
lowerCAmelCase__ = self.backbone.forward_with_filtered_kwargs(
snake_case__ , output_hidden_states=snake_case__ , output_attentions=snake_case__ )
lowerCAmelCase__ = outputs.feature_maps
lowerCAmelCase__ = self.decode_head(snake_case__ )
lowerCAmelCase__ = nn.functional.interpolate(snake_case__ , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=snake_case__ )
lowerCAmelCase__ = None
if self.auxiliary_head is not None:
lowerCAmelCase__ = self.auxiliary_head(snake_case__ )
lowerCAmelCase__ = nn.functional.interpolate(
snake_case__ , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=snake_case__ )
lowerCAmelCase__ = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
lowerCAmelCase__ = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowerCAmelCase__ = loss_fct(snake_case__ , snake_case__ )
lowerCAmelCase__ = loss_fct(snake_case__ , snake_case__ )
lowerCAmelCase__ = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowerCAmelCase__ = (logits,) + outputs[1:]
else:
lowerCAmelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 674 | """simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : Any = "The Nymphenburg Palace is a beautiful palace in Munich!"
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = {
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 1024,
"""hidden_size""": 768,
"""max_length""": 512,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 1024,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1e-5,
"""token_type_vocab_size""": 2,
}
lowerCAmelCase__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowerCAmelCase__ = BERTEncoder(
attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=lowerCamelCase__ , output_all_encodings=lowerCamelCase__ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , lowerCamelCase__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowerCAmelCase__ = """openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
lowerCAmelCase__ = os.path.join(get_home_dir() , """models""" )
lowerCAmelCase__ = _load_vocab(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , cls=lowerCamelCase__ )
lowerCAmelCase__ = nlp.model.BERTModel(
lowerCamelCase__ , len(lowerCamelCase__ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=lowerCamelCase__ , use_token_type_embed=lowerCamelCase__ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=lowerCamelCase__ , use_decoder=lowerCamelCase__ , )
original_bort.load_parameters(lowerCamelCase__ , cast_dtype=lowerCamelCase__ , ignore_extra=lowerCamelCase__ )
lowerCAmelCase__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
lowerCAmelCase__ = {
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.02,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(lowerCamelCase__ ),
}
lowerCAmelCase__ = BertConfig.from_dict(lowerCamelCase__ )
lowerCAmelCase__ = BertForMaskedLM(lowerCamelCase__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase__ = hf_param.shape
lowerCAmelCase__ = to_torch(params[gluon_param] )
lowerCAmelCase__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowerCAmelCase__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowerCAmelCase__ = hf_bort_model.bert.encoder.layer[i]
# self attention
lowerCAmelCase__ = layer.attention.self
lowerCAmelCase__ = check_and_map_params(
self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
lowerCAmelCase__ = layer.attention.output
lowerCAmelCase__ = check_and_map_params(
self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
lowerCAmelCase__ = layer.intermediate
lowerCAmelCase__ = check_and_map_params(
intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
lowerCAmelCase__ = check_and_map_params(
intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
lowerCAmelCase__ = layer.output
lowerCAmelCase__ = check_and_map_params(
bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
lowerCAmelCase__ = RobertaTokenizer.from_pretrained("""roberta-base""" )
lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ )["""input_ids"""]
# Get gluon output
lowerCAmelCase__ = mx.nd.array([input_ids] )
lowerCAmelCase__ = original_bort(inputs=lowerCamelCase__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase__ )
lowerCAmelCase__ = BertModel.from_pretrained(lowerCamelCase__ )
hf_bort_model.eval()
lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ , return_tensors="""pt""" )
lowerCAmelCase__ = hf_bort_model(**lowerCamelCase__ )[0]
lowerCAmelCase__ = output_gluon[0].asnumpy()
lowerCAmelCase__ = output_hf[0].detach().numpy()
lowerCAmelCase__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowerCAmelCase__ = np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
if success:
print("""✔️ Both model do output the same tensors""" )
else:
print("""❌ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""" , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : str = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 674 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : List[Any] = "mobilenet_v2"
def __init__( self : Optional[Any] , snake_case__ : Union[str, Any]=3 , snake_case__ : Tuple=224 , snake_case__ : str=1.0 , snake_case__ : Any=8 , snake_case__ : Dict=8 , snake_case__ : Any=6 , snake_case__ : str=32 , snake_case__ : str=True , snake_case__ : List[str]=True , snake_case__ : List[str]="relu6" , snake_case__ : Tuple=True , snake_case__ : List[str]=0.8 , snake_case__ : int=0.02 , snake_case__ : Any=0.001 , snake_case__ : Dict=255 , **snake_case__ : str , ):
super().__init__(**snake_case__ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = depth_multiplier
lowerCAmelCase__ = depth_divisible_by
lowerCAmelCase__ = min_depth
lowerCAmelCase__ = expand_ratio
lowerCAmelCase__ = output_stride
lowerCAmelCase__ = first_layer_is_expansion
lowerCAmelCase__ = finegrained_output
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = tf_padding
lowerCAmelCase__ = classifier_dropout_prob
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = semantic_loss_ignore_index
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Tuple = version.parse("1.11" )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def _SCREAMING_SNAKE_CASE ( self : int ):
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
return 1E-4
| 674 | """simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self : Optional[int] ):
lowerCAmelCase__ = """"""
lowerCAmelCase__ = """"""
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = 256
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Union[str, Any] ):
lowerCAmelCase__ = cva.imread(snake_case__ , 0 )
lowerCAmelCase__ = copy.deepcopy(self.img )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="""x""" )
lowerCAmelCase__ = np.sum(snake_case__ )
for i in range(len(snake_case__ ) ):
lowerCAmelCase__ = x[i] / self.k
self.sk += prk
lowerCAmelCase__ = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase__ = int(last % last )
lowerCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case__ )
lowerCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase__ = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase__ = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
__lowerCAmelCase : Dict = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
__lowerCAmelCase : Optional[int] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 674 | 1 |
"""simple docstring"""
import os
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = os.path.dirname(os.path.realpath(lowerCamelCase__ ) )
lowerCAmelCase__ = os.path.join(lowerCamelCase__ , """triangle.txt""" )
with open(lowerCamelCase__ ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = []
for line in triangle:
lowerCAmelCase__ = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(lowerCamelCase__ ) )
a.append(lowerCamelCase__ )
for i in range(1 , len(lowerCamelCase__ ) ):
for j in range(len(a[i] ) ):
lowerCAmelCase__ = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowerCAmelCase__ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowerCamelCase__ , lowerCamelCase__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 674 | """simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : List[str] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCamelCase_ : str = "CIDAS/clipseg-rd64-refined"
UpperCamelCase_ : Any = "image_segmenter"
UpperCamelCase_ : Optional[Any] = CLIPSegForImageSegmentation
UpperCamelCase_ : List[str] = ["image", "text"]
UpperCamelCase_ : int = ["image"]
def __init__( self : Tuple , *snake_case__ : str , **snake_case__ : Optional[Any] ):
requires_backends(self , ["""vision"""] )
super().__init__(*snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : "Image" , snake_case__ : str ):
return self.pre_processor(text=[label] , images=[image] , padding=snake_case__ , return_tensors="""pt""" )
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Tuple ):
with torch.no_grad():
lowerCAmelCase__ = self.model(**snake_case__ ).logits
return logits
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : List[Any] ):
lowerCAmelCase__ = outputs.cpu().detach().numpy()
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 674 | 1 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ :
def __init__( self : List[Any] , snake_case__ : Any , snake_case__ : Union[str, Any]=2 , snake_case__ : List[str]=8 , snake_case__ : Optional[int]=True , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]=True , snake_case__ : List[str]=True , snake_case__ : List[Any]=99 , snake_case__ : List[str]=16 , snake_case__ : Dict=5 , snake_case__ : List[Any]=2 , snake_case__ : Optional[Any]=36 , snake_case__ : Any="gelu" , snake_case__ : Dict=0.0 , snake_case__ : List[str]=0.0 , snake_case__ : str=512 , snake_case__ : Union[str, Any]=16 , snake_case__ : Optional[Any]=2 , snake_case__ : List[str]=0.02 , snake_case__ : Dict=3 , snake_case__ : List[Any]=4 , snake_case__ : Dict=None , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = self.get_config()
lowerCAmelCase__ = 300
return config
def _SCREAMING_SNAKE_CASE ( self : Any ):
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase__ = True
lowerCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Any ):
lowerCAmelCase__ = MraModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase__ = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase__ = model(snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Optional[int] , ):
lowerCAmelCase__ = True
lowerCAmelCase__ = MraModel(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase__ = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
lowerCAmelCase__ = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , encoder_hidden_states=snake_case__ , )
lowerCAmelCase__ = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : int ):
lowerCAmelCase__ = MraForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase__ = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : Any , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Tuple ):
lowerCAmelCase__ = MraForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase__ = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : List[Any] ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = MraForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase__ = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : str ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = MraForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase__ = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ):
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = MraForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Any = False
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : int = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : Optional[Any] = ()
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = MraModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ = type
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = MraModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@unittest.skip(reason="""MRA does not output attentions""" )
def _SCREAMING_SNAKE_CASE ( self : str ):
return
@require_torch
class a_ ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
lowerCAmelCase__ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
lowerCAmelCase__ = model(snake_case__ )[0]
lowerCAmelCase__ = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , snake_case__ )
lowerCAmelCase__ = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
lowerCAmelCase__ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
lowerCAmelCase__ = model(snake_case__ )[0]
lowerCAmelCase__ = 50265
lowerCAmelCase__ = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , snake_case__ )
lowerCAmelCase__ = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
lowerCAmelCase__ = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
lowerCAmelCase__ = model(snake_case__ )[0]
lowerCAmelCase__ = 50265
lowerCAmelCase__ = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , snake_case__ )
lowerCAmelCase__ = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
| 674 | """simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = LayoutLMTokenizer
UpperCamelCase_ : List[Any] = LayoutLMTokenizerFast
UpperCamelCase_ : Dict = True
UpperCamelCase_ : Any = True
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
super().setUp()
lowerCAmelCase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : int , **snake_case__ : Union[str, Any] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Tuple ):
lowerCAmelCase__ = """UNwant\u00E9d,running"""
lowerCAmelCase__ = """unwanted, running"""
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(snake_case__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [7, 4, 5, 10, 8, 9] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
| 674 | 1 |
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
lowerCAmelCase__ = hex_num[0] == """-"""
if is_negative:
lowerCAmelCase__ = hex_num[1:]
try:
lowerCAmelCase__ = int(lowerCamelCase__ , 16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
lowerCAmelCase__ = """"""
while int_num > 0:
lowerCAmelCase__ = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 674 | """simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__lowerCAmelCase : Any = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
def __init__( self : List[str] , snake_case__ : int = 14 ):
if group not in primes:
raise ValueError("""Unsupported Group""" )
lowerCAmelCase__ = primes[group]["""prime"""]
lowerCAmelCase__ = primes[group]["""generator"""]
lowerCAmelCase__ = int(hexlify(urandom(32 ) ) , base=16 )
def _SCREAMING_SNAKE_CASE ( self : Any ):
return hex(self.__private_key )[2:]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = pow(self.generator , self.__private_key , self.prime )
return hex(snake_case__ )[2:]
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(snake_case__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : str ):
lowerCAmelCase__ = int(snake_case__ , base=16 )
if not self.is_valid_public_key(snake_case__ ):
raise ValueError("""Invalid public key""" )
lowerCAmelCase__ = pow(snake_case__ , self.__private_key , self.prime )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case__ : int , snake_case__ : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(snake_case__ , (prime - 1) // 2 , snake_case__ ) == 1
)
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case__ : str , snake_case__ : str , snake_case__ : int = 14 ):
lowerCAmelCase__ = int(snake_case__ , base=16 )
lowerCAmelCase__ = int(snake_case__ , base=16 )
lowerCAmelCase__ = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(snake_case__ , snake_case__ ):
raise ValueError("""Invalid public key""" )
lowerCAmelCase__ = pow(snake_case__ , snake_case__ , snake_case__ )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 674 | 1 |
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def _UpperCAmelCase ( lowerCamelCase__="ro" , lowerCamelCase__="en" , lowerCamelCase__="wmt16" , lowerCamelCase__=None ):
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
lowerCAmelCase__ = f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
lowerCAmelCase__ = datasets.load_dataset(lowerCamelCase__ , lowerCamelCase__ )
if save_dir is None:
lowerCAmelCase__ = f"""{dataset}-{pair}"""
lowerCAmelCase__ = Path(lowerCamelCase__ )
save_dir.mkdir(exist_ok=lowerCamelCase__ )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
lowerCAmelCase__ = """val""" if split == """validation""" else split
lowerCAmelCase__ = save_dir.joinpath(f"""{fn}.source""" )
lowerCAmelCase__ = save_dir.joinpath(f"""{fn}.target""" )
lowerCAmelCase__ = src_path.open("""w+""" )
lowerCAmelCase__ = tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
lowerCAmelCase__ = x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 674 | """simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(weights[0] )
lowerCAmelCase__ = np.asarray(weights[1] )
lowerCAmelCase__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(weights[0] )
lowerCAmelCase__ = np.asarray(weights[1] )
lowerCAmelCase__ = np.asarray(weights[2] )
lowerCAmelCase__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = weights[0][0][0]
lowerCAmelCase__ = np.asarray(layer_norm_a[0] )
lowerCAmelCase__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# lsh weights + output
lowerCAmelCase__ = weights[0][1]
if len(lowerCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
else:
set_layer_weights_in_torch_local(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
# intermediate weighs
lowerCAmelCase__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase__ ) == 4:
lowerCAmelCase__ = intermediate_weights[2]
# layernorm 2
lowerCAmelCase__ = np.asarray(intermediate_weights[0][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# intermediate dense
lowerCAmelCase__ = np.asarray(intermediate_weights[1][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
# intermediate out
lowerCAmelCase__ = np.asarray(intermediate_weights[4][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = torch_model.reformer
# word embeds
lowerCAmelCase__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase__ ) , )
if isinstance(weights[3] , lowerCamelCase__ ):
lowerCAmelCase__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCAmelCase__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
lowerCAmelCase__ = nn.Parameter(torch.tensor(lowerCamelCase__ ) )
lowerCAmelCase__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCAmelCase__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# output layer norm
lowerCAmelCase__ = np.asarray(weights[7][0] )
lowerCAmelCase__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# output embeddings
lowerCAmelCase__ = np.asarray(weights[9][0] )
lowerCAmelCase__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = ReformerConfig.from_json_file(lowerCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = ReformerModelWithLMHead(lowerCamelCase__ )
with open(lowerCamelCase__ , """rb""" ) as f:
lowerCAmelCase__ = pickle.load(lowerCamelCase__ )["""weights"""]
set_model_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 674 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=__UpperCamelCase ):
UpperCamelCase_ : Optional[Any] = ["torch", "scipy"]
def __init__( self : Optional[int] , *snake_case__ : str , **snake_case__ : Tuple ):
requires_backends(self , ["""torch""", """scipy"""] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] , *snake_case__ : int , **snake_case__ : Optional[int] ):
requires_backends(cls , ["""torch""", """scipy"""] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] , *snake_case__ : Any , **snake_case__ : Tuple ):
requires_backends(cls , ["""torch""", """scipy"""] )
| 674 | """simple docstring"""
import os
from math import logaa
def _UpperCAmelCase ( lowerCamelCase__ = "base_exp.txt" ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCamelCase__ ) , lowerCamelCase__ ) ) ):
lowerCAmelCase__ , lowerCAmelCase__ = list(map(lowerCamelCase__ , line.split(""",""" ) ) )
if x * logaa(lowerCamelCase__ ) > largest:
lowerCAmelCase__ = x * logaa(lowerCamelCase__ )
lowerCAmelCase__ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 674 | 1 |
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ = 50 ):
"""simple docstring"""
lowerCAmelCase__ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 674 | """simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
while b:
lowerCAmelCase__ , lowerCAmelCase__ = b, a % b
return a
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCamelCase__ , a % b )
def _UpperCAmelCase ( ):
"""simple docstring"""
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 674 | 1 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : List[str] = XGLMTokenizer
UpperCamelCase_ : Union[str, Any] = XGLMTokenizerFast
UpperCamelCase_ : List[str] = True
UpperCamelCase_ : List[str] = True
def _SCREAMING_SNAKE_CASE ( self : str ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = XGLMTokenizer(snake_case__ , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = """<pad>"""
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(snake_case__ ) , 1008 )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = XGLMTokenizer(snake_case__ , keep_accents=snake_case__ )
lowerCAmelCase__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(snake_case__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def _SCREAMING_SNAKE_CASE ( self : str ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(snake_case__ , f.name )
lowerCAmelCase__ = XGLMTokenizer(f.name , keep_accents=snake_case__ )
lowerCAmelCase__ = pickle.dumps(snake_case__ )
pickle.loads(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = """I was born in 92000, and this is falsé."""
lowerCAmelCase__ = tokenizer.tokenize(snake_case__ )
lowerCAmelCase__ = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowerCAmelCase__ = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
lowerCAmelCase__ = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = tokenizer.encode(snake_case__ )
lowerCAmelCase__ = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = """Hello World!"""
lowerCAmelCase__ = [2, 31227, 4447, 35]
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
lowerCAmelCase__ = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
# fmt: off
lowerCAmelCase__ = {
"""input_ids""": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="""facebook/xglm-564M""" , padding=snake_case__ , )
| 674 | """simple docstring"""
import os
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = os.path.dirname(os.path.realpath(lowerCamelCase__ ) )
lowerCAmelCase__ = os.path.join(lowerCamelCase__ , """triangle.txt""" )
with open(lowerCamelCase__ ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = []
for line in triangle:
lowerCAmelCase__ = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(lowerCamelCase__ ) )
a.append(lowerCamelCase__ )
for i in range(1 , len(lowerCamelCase__ ) ):
for j in range(len(a[i] ) ):
lowerCAmelCase__ = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowerCAmelCase__ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowerCamelCase__ , lowerCamelCase__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 674 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if isinstance(lowerCamelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCamelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCamelCase__ ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : int = ["pixel_values"]
def __init__( self : Optional[Any] , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : PILImageResampling = PILImageResampling.BILINEAR , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 255 , snake_case__ : bool = True , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , **snake_case__ : Optional[Any] , ):
super().__init__(**snake_case__ )
lowerCAmelCase__ = size if size is not None else {"""shortest_edge""": 224}
lowerCAmelCase__ = get_size_dict(snake_case__ , default_to_square=snake_case__ )
lowerCAmelCase__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCAmelCase__ = get_size_dict(snake_case__ , param_name="""crop_size""" )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = do_center_crop
lowerCAmelCase__ = crop_size
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : np.ndarray , snake_case__ : Dict[str, int] , snake_case__ : PILImageResampling = PILImageResampling.BILINEAR , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Tuple , ):
lowerCAmelCase__ = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "shortest_edge" in size:
lowerCAmelCase__ = get_resize_output_image_size(snake_case__ , size["""shortest_edge"""] , default_to_square=snake_case__ )
elif "height" in size and "width" in size:
lowerCAmelCase__ = (size["""height"""], size["""width"""])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : np.ndarray , snake_case__ : Dict[str, int] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : List[Any] , ):
lowerCAmelCase__ = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(snake_case__ , size=(size["""height"""], size["""width"""]) , data_format=snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : np.ndarray , snake_case__ : Union[int, float] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Tuple , ):
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : np.ndarray , snake_case__ : Union[float, List[float]] , snake_case__ : Union[float, List[float]] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : List[Any] , ):
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : ImageInput , snake_case__ : bool = None , snake_case__ : Dict[str, int] = None , snake_case__ : PILImageResampling = None , snake_case__ : bool = None , snake_case__ : Dict[str, int] = None , snake_case__ : bool = None , snake_case__ : float = None , snake_case__ : bool = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase__ = to_numpy_array(snake_case__ )
if do_resize:
lowerCAmelCase__ = self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ )
if do_center_crop:
lowerCAmelCase__ = self.center_crop(snake_case__ , size=snake_case__ )
if do_rescale:
lowerCAmelCase__ = self.rescale(image=snake_case__ , scale=snake_case__ )
if do_normalize:
lowerCAmelCase__ = self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ )
lowerCAmelCase__ = to_channel_dimension_format(snake_case__ , snake_case__ )
return image
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : ImageInput , snake_case__ : bool = None , snake_case__ : Dict[str, int] = None , snake_case__ : PILImageResampling = None , snake_case__ : bool = None , snake_case__ : Dict[str, int] = None , snake_case__ : bool = None , snake_case__ : float = None , snake_case__ : bool = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : ChannelDimension = ChannelDimension.FIRST , **snake_case__ : Any , ):
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(snake_case__ , default_to_square=snake_case__ )
lowerCAmelCase__ = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ = get_size_dict(snake_case__ , param_name="""crop_size""" )
if not valid_images(snake_case__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
lowerCAmelCase__ = make_batched(snake_case__ )
lowerCAmelCase__ = [
[
self._preprocess_image(
image=snake_case__ , do_resize=snake_case__ , size=snake_case__ , resample=snake_case__ , do_center_crop=snake_case__ , crop_size=snake_case__ , do_rescale=snake_case__ , rescale_factor=snake_case__ , do_normalize=snake_case__ , image_mean=snake_case__ , image_std=snake_case__ , data_format=snake_case__ , )
for img in video
]
for video in videos
]
lowerCAmelCase__ = {"""pixel_values""": videos}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 674 | """simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__lowerCAmelCase : Union[str, Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
__lowerCAmelCase : Optional[int] = json.load(f)
@require_torch
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Dict ):
return FSMTTokenizer.from_pretrained(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Any ):
lowerCAmelCase__ = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Any , snake_case__ : int ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowerCAmelCase__ = F"""facebook/wmt19-{pair}"""
lowerCAmelCase__ = self.get_tokenizer(snake_case__ )
lowerCAmelCase__ = self.get_model(snake_case__ )
lowerCAmelCase__ = bleu_data[pair]["""src"""]
lowerCAmelCase__ = bleu_data[pair]["""tgt"""]
lowerCAmelCase__ = tokenizer(snake_case__ , return_tensors="""pt""" , truncation=snake_case__ , padding="""longest""" ).to(snake_case__ )
lowerCAmelCase__ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCAmelCase__ = tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
lowerCAmelCase__ = calculate_bleu(snake_case__ , snake_case__ )
print(snake_case__ )
self.assertGreaterEqual(scores["""bleu"""] , snake_case__ )
| 674 | 1 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCAmelCase : List[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : Dict = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__lowerCAmelCase : Dict = spec.loader.load_module()
__lowerCAmelCase : Optional[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCAmelCase : Any = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__lowerCAmelCase : int = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCAmelCase__ = False
# source code of `config_class`
lowerCAmelCase__ = inspect.getsource(lowerCamelCase__ )
lowerCAmelCase__ = _re_checkpoint.findall(lowerCamelCase__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCAmelCase__ , lowerCAmelCase__ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase__ = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase__ = True
break
lowerCAmelCase__ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
lowerCAmelCase__ = """\n""".join(sorted(lowerCamelCase__ ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 674 | """simple docstring"""
import pprint
import requests
__lowerCAmelCase : Union[str, Any] = "https://zenquotes.io/api"
def _UpperCAmelCase ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def _UpperCAmelCase ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = random_quotes()
pprint.pprint(response)
| 674 | 1 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = OmegaConf.load(lowerCamelCase__ )
lowerCAmelCase__ = torch.load(lowerCamelCase__ , map_location="""cpu""" )["""model"""]
lowerCAmelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase__ = {}
lowerCAmelCase__ = """first_stage_model."""
for key in keys:
if key.startswith(lowerCamelCase__ ):
lowerCAmelCase__ = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase__ = {}
lowerCAmelCase__ = """model.diffusion_model."""
for key in keys:
if key.startswith(lowerCamelCase__ ):
lowerCAmelCase__ = state_dict[key]
lowerCAmelCase__ = config.model.params.first_stage_config.params
lowerCAmelCase__ = config.model.params.unet_config.params
lowerCAmelCase__ = VQModel(**lowerCamelCase__ ).eval()
vqvae.load_state_dict(lowerCamelCase__ )
lowerCAmelCase__ = UNetLDMModel(**lowerCamelCase__ ).eval()
unet.load_state_dict(lowerCamelCase__ )
lowerCAmelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=lowerCamelCase__ , )
lowerCAmelCase__ = LDMPipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
pipeline.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", type=str, required=True)
parser.add_argument("--config_path", type=str, required=True)
parser.add_argument("--output_path", type=str, required=True)
__lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 674 | """simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = 0
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ).to_dict()
config_dict.pop("""image_processor_type""" )
lowerCAmelCase__ = CLIPImageProcessor(**snake_case__ )
# save in new folder
model_config.save_pretrained(snake_case__ )
config.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
# make sure private variable is not incorrectly saved
lowerCAmelCase__ = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
with self.assertRaisesRegex(
snake_case__ , """clip-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""clip-base""" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
with self.assertRaisesRegex(
snake_case__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , revision="""aaaaaa""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
with self.assertRaisesRegex(
snake_case__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def _SCREAMING_SNAKE_CASE ( self : Any ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , trust_remote_code=snake_case__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoImageProcessor.register(snake_case__ , snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoImageProcessor.register(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = CustomImageProcessor.from_pretrained(snake_case__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Tuple = True
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoImageProcessor.register(snake_case__ , snake_case__ )
# If remote code is not set, the default is to use local
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(snake_case__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 674 | 1 |
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = """"""
for i in table:
res += inp[i - 1]
return res
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return data[1:] + data[0]
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = """"""
for i in range(len(lowerCamelCase__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = int("""0b""" + data[0] + data[-1] , 2 )
lowerCAmelCase__ = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = message[:4]
lowerCAmelCase__ = message[4:]
lowerCAmelCase__ = apply_table(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = xor(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = apply_sbox(lowerCamelCase__ , temp[:4] ) # noqa: E741
lowerCAmelCase__ = apply_sbox(lowerCamelCase__ , temp[4:] )
lowerCAmelCase__ = """0""" * (2 - len(lowerCamelCase__ )) + l # noqa: E741
lowerCAmelCase__ = """0""" * (2 - len(lowerCamelCase__ )) + r
lowerCAmelCase__ = apply_table(l + r , lowerCamelCase__ )
lowerCAmelCase__ = xor(lowerCamelCase__ , lowerCamelCase__ )
return temp + right
if __name__ == "__main__":
__lowerCAmelCase : List[str] = input("Enter 10 bit key: ")
__lowerCAmelCase : Optional[int] = input("Enter 8 bit message: ")
__lowerCAmelCase : str = [6, 3, 7, 4, 8, 5, 10, 9]
__lowerCAmelCase : Union[str, Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
__lowerCAmelCase : Tuple = [2, 4, 3, 1]
__lowerCAmelCase : Dict = [2, 6, 3, 1, 4, 8, 5, 7]
__lowerCAmelCase : str = [4, 1, 3, 5, 7, 2, 8, 6]
__lowerCAmelCase : List[str] = [4, 1, 2, 3, 2, 3, 4, 1]
__lowerCAmelCase : Union[str, Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__lowerCAmelCase : str = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__lowerCAmelCase : List[Any] = apply_table(key, paa_table)
__lowerCAmelCase : Dict = temp[:5]
__lowerCAmelCase : List[Any] = temp[5:]
__lowerCAmelCase : int = left_shift(left)
__lowerCAmelCase : Optional[Any] = left_shift(right)
__lowerCAmelCase : List[Any] = apply_table(left + right, pa_table)
__lowerCAmelCase : Tuple = left_shift(left)
__lowerCAmelCase : Dict = left_shift(right)
__lowerCAmelCase : Any = left_shift(left)
__lowerCAmelCase : Optional[Any] = left_shift(right)
__lowerCAmelCase : int = apply_table(left + right, pa_table)
# encryption
__lowerCAmelCase : Union[str, Any] = apply_table(message, IP)
__lowerCAmelCase : Dict = function(expansion, sa, sa, keya, temp)
__lowerCAmelCase : List[Any] = temp[4:] + temp[:4]
__lowerCAmelCase : List[str] = function(expansion, sa, sa, keya, temp)
__lowerCAmelCase : Any = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
__lowerCAmelCase : Union[str, Any] = apply_table(CT, IP)
__lowerCAmelCase : int = function(expansion, sa, sa, keya, temp)
__lowerCAmelCase : List[str] = temp[4:] + temp[:4]
__lowerCAmelCase : List[str] = function(expansion, sa, sa, keya, temp)
__lowerCAmelCase : List[str] = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 674 | """simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a_ :
def __init__( self : Optional[int] , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=64 , snake_case__ : Any=None ):
lowerCAmelCase__ = np.random.default_rng(snake_case__ )
lowerCAmelCase__ = length
lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Optional[Any] ):
return self.length
def __getitem__( self : List[str] , snake_case__ : Optional[int] ):
return {"x": self.x[i], "y": self.y[i]}
class a_ ( torch.nn.Module ):
def __init__( self : List[str] , snake_case__ : str=0 , snake_case__ : Dict=0 , snake_case__ : Any=False ):
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = True
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Any=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a[0] + self.b[0]
class a_ ( torch.nn.Module ):
def __init__( self : Any , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=0 , snake_case__ : List[Any]=False ):
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() )
lowerCAmelCase__ = True
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any]=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a + self.b
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase__ = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
lowerCAmelCase__ = load_dataset("""csv""" , data_files=lowerCamelCase__ )
lowerCAmelCase__ = datasets["""train"""].unique("""label""" )
lowerCAmelCase__ = {v: i for i, v in enumerate(lowerCamelCase__ )}
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" )
if "label" in examples:
lowerCAmelCase__ = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCamelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=2 )
lowerCAmelCase__ = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 674 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.