code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase_ (unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , lowercase_=True , lowercase_=1 / 255 , lowercase_=True , ) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a__ =size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
a__ =parent
a__ =batch_size
a__ =num_channels
a__ =min_resolution
a__ =max_resolution
a__ =do_resize
a__ =size
a__ =do_normalize
a__ =image_mean
a__ =image_std
a__ =do_rescale
a__ =rescale_factor
a__ =do_pad
def __UpperCamelCase ( self) -> Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCamelCase ( self , lowercase_ , lowercase_=False) -> Any:
if not batched:
a__ =image_inputs[0]
if isinstance(lowercase_ , Image.Image):
a__ , a__ =image.size
else:
a__ , a__ =image.shape[1], image.shape[2]
if w < h:
a__ =int(self.size['shortest_edge'] * h / w)
a__ =self.size['shortest_edge']
elif w > h:
a__ =self.size['shortest_edge']
a__ =int(self.size['shortest_edge'] * w / h)
else:
a__ =self.size['shortest_edge']
a__ =self.size['shortest_edge']
else:
a__ =[]
for image in image_inputs:
a__ , a__ =self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
a__ =max(lowercase_ , key=lambda lowercase_: item[0])[0]
a__ =max(lowercase_ , key=lambda lowercase_: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =ConditionalDetrImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self) -> Tuple:
a__ =ConditionalDetrImageProcessingTester(self)
@property
def __UpperCamelCase ( self) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self) -> Any:
a__ =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase_ , 'image_mean'))
self.assertTrue(hasattr(lowercase_ , 'image_std'))
self.assertTrue(hasattr(lowercase_ , 'do_normalize'))
self.assertTrue(hasattr(lowercase_ , 'do_resize'))
self.assertTrue(hasattr(lowercase_ , 'size'))
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333})
self.assertEqual(image_processor.do_pad , lowercase_)
a__ =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase_)
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84})
self.assertEqual(image_processor.do_pad , lowercase_)
def __UpperCamelCase ( self) -> Optional[Any]:
pass
def __UpperCamelCase ( self) -> Optional[Any]:
# Initialize image_processing
a__ =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a__ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image)
# Test not batched input
a__ =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
a__ , a__ =self.image_processor_tester.get_expected_values(lowercase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ , a__ =self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_)
a__ =image_processing(lowercase_ , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self) -> Optional[int]:
# Initialize image_processing
a__ =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a__ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray)
# Test not batched input
a__ =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
a__ , a__ =self.image_processor_tester.get_expected_values(lowercase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ =image_processing(lowercase_ , return_tensors='pt').pixel_values
a__ , a__ =self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self) -> Optional[Any]:
# Initialize image_processing
a__ =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a__ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor)
# Test not batched input
a__ =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
a__ , a__ =self.image_processor_tester.get_expected_values(lowercase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ =image_processing(lowercase_ , return_tensors='pt').pixel_values
a__ , a__ =self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCamelCase ( self) -> List[Any]:
# prepare image and target
a__ =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r') as f:
a__ =json.loads(f.read())
a__ ={'image_id': 39769, 'annotations': target}
# encode them
a__ =ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50')
a__ =image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors='pt')
# verify pixel values
a__ =torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding['pixel_values'].shape , lowercase_)
a__ =torch.tensor([0.27_96, 0.31_38, 0.34_81])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase_ , atol=1e-4))
# verify area
a__ =torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase_))
# verify boxes
a__ =torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase_)
a__ =torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase_ , atol=1e-3))
# verify image_id
a__ =torch.tensor([39769])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase_))
# verify is_crowd
a__ =torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase_))
# verify class_labels
a__ =torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase_))
# verify orig_size
a__ =torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase_))
# verify size
a__ =torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase_))
@slow
def __UpperCamelCase ( self) -> Dict:
# prepare image, target and masks_path
a__ =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r') as f:
a__ =json.loads(f.read())
a__ ={'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
a__ =pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic')
# encode them
a__ =ConditionalDetrImageProcessor(format='coco_panoptic')
a__ =image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors='pt')
# verify pixel values
a__ =torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding['pixel_values'].shape , lowercase_)
a__ =torch.tensor([0.27_96, 0.31_38, 0.34_81])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase_ , atol=1e-4))
# verify area
a__ =torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase_))
# verify boxes
a__ =torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase_)
a__ =torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase_ , atol=1e-3))
# verify image_id
a__ =torch.tensor([39769])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase_))
# verify is_crowd
a__ =torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase_))
# verify class_labels
a__ =torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase_))
# verify masks
a__ =822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowercase_)
# verify orig_size
a__ =torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase_))
# verify size
a__ =torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase_))
| 20 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCAmelCase: Optional[Any] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[str] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20 | 1 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class lowercase_ (lowercase__ ):
snake_case ='autoformer'
snake_case ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , lowercase_ = None , lowercase_ = None , lowercase_ = "student_t" , lowercase_ = "nll" , lowercase_ = 1 , lowercase_ = [1, 2, 3, 4, 5, 6, 7] , lowercase_ = True , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 64 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 32 , lowercase_ = 32 , lowercase_ = "gelu" , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 100 , lowercase_ = 0.02 , lowercase_ = True , lowercase_=True , lowercase_ = 10 , lowercase_ = 25 , lowercase_ = 3 , **lowercase_ , ) -> Union[str, Any]:
# time series specific configuration
a__ =prediction_length
a__ =context_length if context_length is not None else prediction_length
a__ =distribution_output
a__ =loss
a__ =input_size
a__ =num_time_features
a__ =lags_sequence
a__ =scaling
a__ =num_dynamic_real_features
a__ =num_static_real_features
a__ =num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`')
a__ =cardinality
else:
a__ =[0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`')
a__ =embedding_dimension
else:
a__ =[min(50 , (cat + 1) // 2) for cat in self.cardinality]
a__ =num_parallel_samples
# Transformer architecture configuration
a__ =input_size * len(self.lags_sequence) + self._number_of_features
a__ =d_model
a__ =encoder_attention_heads
a__ =decoder_attention_heads
a__ =encoder_ffn_dim
a__ =decoder_ffn_dim
a__ =encoder_layers
a__ =decoder_layers
a__ =dropout
a__ =attention_dropout
a__ =activation_dropout
a__ =encoder_layerdrop
a__ =decoder_layerdrop
a__ =activation_function
a__ =init_std
a__ =use_cache
# Autoformer
a__ =label_length
a__ =moving_average
a__ =autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def __UpperCamelCase ( self) -> int:
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 20 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase: str = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowercase_ (lowercase__ ):
snake_case ='big_bird'
def __init__( self , lowercase_=50358 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu_new" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=4096 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=66 , lowercase_="block_sparse" , lowercase_=True , lowercase_=False , lowercase_=64 , lowercase_=3 , lowercase_=None , **lowercase_ , ) -> Any:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , )
a__ =vocab_size
a__ =max_position_embeddings
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =initializer_range
a__ =type_vocab_size
a__ =layer_norm_eps
a__ =use_cache
a__ =rescale_embeddings
a__ =attention_type
a__ =use_bias
a__ =block_size
a__ =num_random_blocks
a__ =classifier_dropout
class lowercase_ (lowercase__ ):
@property
def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a__ ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__ ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 20 | 1 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=2 , lowercase_=3 , lowercase_=4 , lowercase_=2 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=99 , lowercase_=36 , lowercase_=3 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=16 , lowercase_=2 , lowercase_=0.02 , lowercase_=6 , lowercase_=6 , lowercase_=3 , lowercase_=4 , lowercase_=None , lowercase_=1000 , ) -> str:
a__ =parent
a__ =batch_size
a__ =num_channels
a__ =image_size
a__ =patch_size
a__ =text_seq_length
a__ =is_training
a__ =use_input_mask
a__ =use_token_type_ids
a__ =use_labels
a__ =vocab_size
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =max_position_embeddings
a__ =type_vocab_size
a__ =type_sequence_label_size
a__ =initializer_range
a__ =coordinate_size
a__ =shape_size
a__ =num_labels
a__ =num_choices
a__ =scope
a__ =range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
a__ =text_seq_length
a__ =(image_size // patch_size) ** 2 + 1
a__ =self.text_seq_length + self.image_seq_length
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size)
a__ =ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
a__ =bbox[i, j, 3]
a__ =bbox[i, j, 1]
a__ =t
if bbox[i, j, 2] < bbox[i, j, 0]:
a__ =bbox[i, j, 2]
a__ =bbox[i, j, 0]
a__ =t
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =None
if self.use_input_mask:
a__ =random_attention_mask([self.batch_size, self.text_seq_length])
a__ =None
if self.use_token_type_ids:
a__ =ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size)
a__ =None
a__ =None
if self.use_labels:
a__ =ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ =ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels)
a__ =LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> str:
a__ =LayoutLMvaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
# text + image
a__ =model(lowercase_ , pixel_values=lowercase_)
a__ =model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_)
a__ =model(lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , token_type_ids=lowercase_)
a__ =model(lowercase_ , bbox=lowercase_ , pixel_values=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# text only
a__ =model(lowercase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size))
# image only
a__ =model(pixel_values=lowercase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> List[str]:
a__ =self.num_labels
a__ =LayoutLMvaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> str:
a__ =self.num_labels
a__ =LayoutLMvaForTokenClassification(config=lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> str:
a__ =LayoutLMvaForQuestionAnswering(config=lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __UpperCamelCase ( self) -> Tuple:
a__ =self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) =config_and_inputs
a__ ={
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase_ (lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =False
snake_case =False
snake_case =False
snake_case =(
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case =(
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Dict:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __UpperCamelCase ( self) -> int:
a__ =LayoutLMvaModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , hidden_size=37)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=False) -> Tuple:
a__ =copy.deepcopy(lowercase_)
if model_class in get_values(lowercase_):
a__ ={
k: v.unsqueeze(1).expand(-1 , self.model_tester.num_choices , -1).contiguous()
if isinstance(lowercase_ , torch.Tensor) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase_):
a__ =torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
elif model_class in get_values(lowercase_):
a__ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
a__ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
elif model_class in [
*get_values(lowercase_),
]:
a__ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
elif model_class in [
*get_values(lowercase_),
]:
a__ =torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowercase_ , )
return inputs_dict
def __UpperCamelCase ( self) -> str:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self) -> List[str]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ =type
self.model_tester.create_and_check_model(*lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_)
@slow
def __UpperCamelCase ( self) -> Optional[Any]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ =LayoutLMvaModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def _lowercase( ):
a__ =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class lowercase_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self) -> List[str]:
return LayoutLMvaImageProcessor(apply_ocr=lowercase_) if is_vision_available() else None
@slow
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base').to(lowercase_)
a__ =self.default_image_processor
a__ =prepare_img()
a__ =image_processor(images=lowercase_ , return_tensors='pt').pixel_values.to(lowercase_)
a__ =torch.tensor([[1, 2]])
a__ =torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).unsqueeze(0)
# forward pass
a__ =model(
input_ids=input_ids.to(lowercase_) , bbox=bbox.to(lowercase_) , pixel_values=pixel_values.to(lowercase_) , )
# verify the logits
a__ =torch.Size((1, 199, 768))
self.assertEqual(outputs.last_hidden_state.shape , lowercase_)
a__ =torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4))
| 20 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger(__name__)
_lowerCAmelCase: Tuple = torch.device('cpu')
def _lowercase( ):
a__ ='http://images.cocodataset.org/val2017/000000039769.jpg'
a__ =Image.open(requests.get(__a , stream=__a ).raw )
return im
def _lowercase( __a : Optional[Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _lowercase( __a : int , __a : int , __a : Optional[Any] ):
a__ =dct.pop(__a )
a__ =val
def _lowercase( __a : Optional[Any] ):
a__ =[]
for k in state_dict.keys():
a__ =k
if ".pwconv" in k:
a__ =k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a__ =k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a__ =k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a__ =k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a__ =k_new.split('.' )
if ls[2].isdigit():
a__ ='swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__ =k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _lowercase( __a : Union[str, Any] , __a : int , __a : str ):
a__ =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ =1000
a__ ='huggingface/label-files'
a__ ='imagenet-1k-id2label.json'
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ =[3, 3, 6, 4]
a__ =[48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a__ =[3, 3, 9, 6]
a__ =[48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a__ =[4, 3, 10, 5]
a__ =[48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a__ =[4, 4, 12, 6]
a__ =[64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a__ =torch.hub.load_state_dict_from_url(__a , map_location='cpu' , check_hash=__a )
else:
a__ =torch.load(__a , map_location='cpu' )
a__ =checkpoint
a__ =create_rename_keys(__a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__a , __a , __a )
# load HuggingFace model
a__ =SwiftFormerForImageClassification(__a ).eval()
hf_model.load_state_dict(__a )
# prepare test inputs
a__ =prepare_img()
a__ =ViTImageProcessor.from_pretrained('preprocessor_config' )
a__ =processor(images=__a , return_tensors='pt' )
# compare outputs from both models
a__ =get_expected_output(__a )
a__ =hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __a , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_lowerCAmelCase: Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 20 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
_lowerCAmelCase: Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class lowercase_ (lowercase__ ):
snake_case ='layoutlmv3'
def __init__( self , lowercase_=50265 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-5 , lowercase_=1 , lowercase_=0 , lowercase_=2 , lowercase_=1024 , lowercase_=128 , lowercase_=128 , lowercase_=True , lowercase_=32 , lowercase_=128 , lowercase_=64 , lowercase_=256 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=224 , lowercase_=3 , lowercase_=16 , lowercase_=None , **lowercase_ , ) -> int:
super().__init__(
vocab_size=lowercase_ , hidden_size=lowercase_ , num_hidden_layers=lowercase_ , num_attention_heads=lowercase_ , intermediate_size=lowercase_ , hidden_act=lowercase_ , hidden_dropout_prob=lowercase_ , attention_probs_dropout_prob=lowercase_ , max_position_embeddings=lowercase_ , type_vocab_size=lowercase_ , initializer_range=lowercase_ , layer_norm_eps=lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ , )
a__ =max_ad_position_embeddings
a__ =coordinate_size
a__ =shape_size
a__ =has_relative_attention_bias
a__ =rel_pos_bins
a__ =max_rel_pos
a__ =has_spatial_attention_bias
a__ =rel_ad_pos_bins
a__ =max_rel_ad_pos
a__ =text_embed
a__ =visual_embed
a__ =input_size
a__ =num_channels
a__ =patch_size
a__ =classifier_dropout
class lowercase_ (lowercase__ ):
snake_case =version.parse('1.12' )
@property
def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
])
@property
def __UpperCamelCase ( self) -> float:
return 1e-5
@property
def __UpperCamelCase ( self) -> int:
return 12
def __UpperCamelCase ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , lowercase_ = 3 , lowercase_ = 40 , lowercase_ = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , 'apply_ocr' , lowercase_)
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a__ =compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a__ =processor.tokenizer.num_special_tokens_to_add(lowercase_)
a__ =compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_)
# Generate dummy inputs according to compute batch and sequence
a__ =[[' '.join([processor.tokenizer.unk_token]) * seq_length]] * batch_size
# Generate dummy bounding boxes
a__ =[[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
a__ =self._generate_dummy_images(lowercase_ , lowercase_ , lowercase_ , lowercase_)
a__ =dict(
processor(
lowercase_ , text=lowercase_ , boxes=lowercase_ , return_tensors=lowercase_ , ))
return inputs
| 20 |
from __future__ import annotations
from typing import Any
class lowercase_ :
def __init__( self , lowercase_) -> None:
a__ =num_of_nodes
a__ =[]
a__ ={}
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
self.m_edges.append([u_node, v_node, weight])
def __UpperCamelCase ( self , lowercase_) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def __UpperCamelCase ( self , lowercase_) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
a__ =self.find_component(lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
if component_size[u_node] <= component_size[v_node]:
a__ =v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowercase_)
elif component_size[u_node] >= component_size[v_node]:
a__ =self.find_component(lowercase_)
component_size[u_node] += component_size[v_node]
self.set_component(lowercase_)
def __UpperCamelCase ( self) -> None:
a__ =[]
a__ =0
a__ =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
a__ =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
a__ =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowercase_ , lowercase_):
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowercase_ , lowercase_ , lowercase_)
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""")
num_of_components -= 1
a__ =[-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""")
def _lowercase( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase: Dict = {
'configuration_mobilenet_v2': [
'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileNetV2Config',
'MobileNetV2OnnxConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[str] = ['MobileNetV2FeatureExtractor']
_lowerCAmelCase: Optional[int] = ['MobileNetV2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: Dict = [
'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileNetV2ForImageClassification',
'MobileNetV2ForSemanticSegmentation',
'MobileNetV2Model',
'MobileNetV2PreTrainedModel',
'load_tf_weights_in_mobilenet_v2',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
_lowerCAmelCase: Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCAmelCase: Union[str, Any] = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
_lowerCAmelCase: Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
_lowerCAmelCase: List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
def __UpperCamelCase ( self) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=True , lowercase_=False) -> Any:
if rouge_types is None:
a__ =['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
a__ =rouge_scorer.RougeScorer(rouge_types=lowercase_ , use_stemmer=lowercase_)
if use_aggregator:
a__ =scoring.BootstrapAggregator()
else:
a__ =[]
for ref, pred in zip(lowercase_ , lowercase_):
a__ =scorer.score(lowercase_ , lowercase_)
if use_aggregator:
aggregator.add_scores(lowercase_)
else:
scores.append(lowercase_)
if use_aggregator:
a__ =aggregator.aggregate()
else:
a__ ={}
for key in scores[0]:
a__ =[score[key] for score in scores]
return result
| 20 | 1 |
def _lowercase( __a : list ):
a__ =len(__a )
for i in range(1 , __a ):
a__ =collection[i]
a__ =0
a__ =i - 1
while low <= high:
a__ =(low + high) // 2
if val < collection[mid]:
a__ =mid - 1
else:
a__ =mid + 1
for j in range(__a , __a , -1 ):
a__ =collection[j - 1]
a__ =val
return collection
if __name__ == "__main__":
_lowerCAmelCase: Optional[int] = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase: str = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 20 |
from __future__ import annotations
_lowerCAmelCase: str = '#'
class lowercase_ :
def __init__( self) -> None:
a__ ={}
def __UpperCamelCase ( self , lowercase_) -> None:
a__ =self._trie
for char in text:
if char not in trie:
a__ ={}
a__ =trie[char]
a__ =True
def __UpperCamelCase ( self , lowercase_) -> tuple | list:
a__ =self._trie
for char in prefix:
if char in trie:
a__ =trie[char]
else:
return []
return self._elements(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> tuple:
a__ =[]
for c, v in d.items():
a__ =[' '] if c == END else [(c + s) for s in self._elements(lowercase_)]
result.extend(lowercase_)
return tuple(lowercase_)
_lowerCAmelCase: Optional[int] = Trie()
_lowerCAmelCase: List[str] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _lowercase( __a : str ):
a__ =trie.find_word(__a )
return tuple(string + word for word in suffixes )
def _lowercase( ):
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 | 1 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _lowercase( __a : list[list[float]] ):
a__ =Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(__a ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
a__ =float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
a__ =[[0.0, 0.0], [0.0, 0.0]]
a__ , a__ =matrix[1][1], matrix[0][0]
a__ , a__ =-matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(__a ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(__a ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
a__ =float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
a__ =[
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
a__ =(d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
a__ =-(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
a__ =(d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
a__ =-(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
a__ =(d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
a__ =-(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
a__ =(d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
a__ =-(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
a__ =(d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
a__ =array(__a )
for i in range(3 ):
for j in range(3 ):
a__ =cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
a__ =array(__a )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(__a )
# Calculate the inverse of the matrix
return [[float(d(__a ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 20 |
_lowerCAmelCase: List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _lowercase( ):
a__ =input('Enter message: ' )
a__ =input('Enter key [alphanumeric]: ' )
a__ =input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
a__ ='encrypt'
a__ =encrypt_message(__a , __a )
elif mode.lower().startswith('d' ):
a__ ='decrypt'
a__ =decrypt_message(__a , __a )
print(f"""\n{mode.title()}ed message:""" )
print(__a )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'encrypt' )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'decrypt' )
def _lowercase( __a : str , __a : str , __a : str ):
a__ =[]
a__ =0
a__ =key.upper()
for symbol in message:
a__ =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__a )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__a ):
a__ =0
else:
translated.append(__a )
return "".join(__a )
if __name__ == "__main__":
main()
| 20 | 1 |
def _lowercase( __a : str ):
a__ =[0 for i in range(len(__a ) )]
# initialize interval's left pointer and right pointer
a__ , a__ =0, 0
for i in range(1 , len(__a ) ):
# case when current index is inside the interval
if i <= right_pointer:
a__ =min(right_pointer - i + 1 , z_result[i - left_pointer] )
a__ =min_edge
while go_next(__a , __a , __a ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
a__ , a__ =i, i + z_result[i] - 1
return z_result
def _lowercase( __a : int , __a : list[int] , __a : str ):
return i + z_result[i] < len(__a ) and s[z_result[i]] == s[i + z_result[i]]
def _lowercase( __a : str , __a : str ):
a__ =0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
a__ =z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(__a ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _lowercase( __a : int , __a : int , __a : int , __a : int , __a : int , __a : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
a__ =ksize + 1
a__ =np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__a ):
for x in range(__a ):
# distance from center
a__ =x - ksize // 2
a__ =y - ksize // 2
# degree to radiant
a__ =theta / 180 * np.pi
a__ =np.cos(_theta )
a__ =np.sin(_theta )
# get kernel x
a__ =cos_theta * px + sin_theta * py
# get kernel y
a__ =-sin_theta * px + cos_theta * py
# fill kernel
a__ =np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_lowerCAmelCase: Union[str, Any] = imread('../image_data/lena.jpg')
# turn image in gray scale value
_lowerCAmelCase: Tuple = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_lowerCAmelCase: Union[str, Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_lowerCAmelCase: Any = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_lowerCAmelCase: Tuple = out / out.max() * 255
_lowerCAmelCase: int = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 20 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =KandinskyVaaPriorPipeline
snake_case =['prompt']
snake_case =['prompt', 'negative_prompt']
snake_case =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
snake_case =False
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 32
@property
def __UpperCamelCase ( self) -> Tuple:
return 32
@property
def __UpperCamelCase ( self) -> int:
return self.time_input_dim
@property
def __UpperCamelCase ( self) -> str:
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 100
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
torch.manual_seed(0)
a__ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase_)
@property
def __UpperCamelCase ( self) -> Tuple:
torch.manual_seed(0)
a__ ={
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
a__ =PriorTransformer(**lowercase_)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a__ =nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def __UpperCamelCase ( self) -> Any:
torch.manual_seed(0)
a__ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a__ =CLIPVisionModelWithProjection(lowercase_)
return model
@property
def __UpperCamelCase ( self) -> Optional[int]:
a__ =CLIPImageProcessor(
crop_size=224 , do_center_crop=lowercase_ , do_normalize=lowercase_ , do_resize=lowercase_ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def __UpperCamelCase ( self) -> Any:
a__ =self.dummy_prior
a__ =self.dummy_image_encoder
a__ =self.dummy_text_encoder
a__ =self.dummy_tokenizer
a__ =self.dummy_image_processor
a__ =UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=lowercase_ , clip_sample_range=10.0 , )
a__ ={
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def __UpperCamelCase ( self , lowercase_ , lowercase_=0) -> Tuple:
if str(lowercase_).startswith('mps'):
a__ =torch.manual_seed(lowercase_)
else:
a__ =torch.Generator(device=lowercase_).manual_seed(lowercase_)
a__ ={
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self) -> int:
a__ ='cpu'
a__ =self.get_dummy_components()
a__ =self.pipeline_class(**lowercase_)
a__ =pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
a__ =pipe(**self.get_dummy_inputs(lowercase_))
a__ =output.image_embeds
a__ =pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
a__ =image[0, -10:]
a__ =image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a__ =np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def __UpperCamelCase ( self) -> List[Any]:
a__ =torch_device == 'cpu'
a__ =True
a__ =False
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ , relax_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
@skip_mps
def __UpperCamelCase ( self) -> Optional[int]:
a__ =torch_device == 'cpu'
a__ =False
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
| 20 | 1 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _lowercase( __a : Optional[int] ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowercase_ (lowercase__ ):
@staticmethod
def __UpperCamelCase ( lowercase_) -> Optional[Any]:
a__ =parser.add_parser('download')
download_parser.add_argument(
'--cache-dir' , type=lowercase_ , default=lowercase_ , help='Path to location to store the models')
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir')
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=lowercase_ , help='Name of the model to download')
download_parser.set_defaults(func=lowercase_)
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> str:
a__ =model
a__ =cache
a__ =force
a__ =trust_remote_code
def __UpperCamelCase ( self) -> Optional[int]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code)
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code)
| 20 |
from manim import *
class lowercase_ (lowercase__ ):
def __UpperCamelCase ( self) -> List[Any]:
a__ =Rectangle(height=0.5 , width=0.5)
a__ =Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
a__ =[mem.copy() for i in range(6)]
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('CPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(4)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('GPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
gpu.move_to([-1, -1, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Model' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
model.move_to([3, -1.0, 0])
self.add(lowercase_)
a__ =[]
for i, rect in enumerate(lowercase_):
rect.set_stroke(lowercase_)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
a__ =Rectangle(height=0.46 / 4 , width=0.46 / 3).set_stroke(width=0.0).set_fill(lowercase_ , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowercase_)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0)
self.add(lowercase_)
cpu_targs.append(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Loaded Checkpoint' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4)
checkpoint.move_to([3, 0.5, 0])
a__ =Square(side_length=2.2)
key.move_to([-5, 2, 0])
a__ =MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(lowercase_ , lowercase_)
a__ =MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left())
a__ =MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(lowercase_) , Write(lowercase_))
self.play(Write(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1))
a__ =[]
a__ =[]
for i, rect in enumerate(lowercase_):
a__ =fill.copy().set_fill(lowercase_ , opacity=0.7)
target.move_to(lowercase_)
first_animations.append(GrowFromCenter(lowercase_ , run_time=1))
a__ =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5))
self.play(*lowercase_)
self.play(*lowercase_)
self.wait()
| 20 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase: Dict = logging.get_logger(__name__)
_lowerCAmelCase: Tuple = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class lowercase_ (lowercase__ ):
snake_case ='glpn'
def __init__( self , lowercase_=3 , lowercase_=4 , lowercase_=[2, 2, 2, 2] , lowercase_=[8, 4, 2, 1] , lowercase_=[32, 64, 160, 256] , lowercase_=[7, 3, 3, 3] , lowercase_=[4, 2, 2, 2] , lowercase_=[1, 2, 5, 8] , lowercase_=[4, 4, 4, 4] , lowercase_="gelu" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=0.1 , lowercase_=1e-6 , lowercase_=64 , lowercase_=10 , lowercase_=-1 , **lowercase_ , ) -> int:
super().__init__(**lowercase_)
a__ =num_channels
a__ =num_encoder_blocks
a__ =depths
a__ =sr_ratios
a__ =hidden_sizes
a__ =patch_sizes
a__ =strides
a__ =mlp_ratios
a__ =num_attention_heads
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =initializer_range
a__ =drop_path_rate
a__ =layer_norm_eps
a__ =decoder_hidden_size
a__ =max_depth
a__ =head_in_index
| 20 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowerCAmelCase: Any = sys.version_info >= (3, 10)
def _lowercase( __a : int=None , __a : Any=None ):
return field(default_factory=lambda: default , metadata=__a )
@dataclass
class lowercase_ :
snake_case =42
snake_case =42
snake_case =42
snake_case =42
@dataclass
class lowercase_ :
snake_case =42
snake_case =field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
snake_case =42
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =BasicEnum(self.foo)
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =MixedTypeEnum(self.foo)
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
@dataclass
class lowercase_ :
snake_case =list_field(default=[] )
snake_case =list_field(default=[1, 2, 3] )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case =list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowercase_ :
snake_case =field()
snake_case =field()
snake_case =field()
def __UpperCamelCase ( self) -> List[Any]:
a__ =BasicEnum(self.required_enum)
@dataclass
class lowercase_ :
snake_case =42
snake_case =field()
snake_case =None
snake_case =field(default='toto' , metadata={'help': 'help message'} )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> int:
self.assertEqual(len(a._actions) , len(b._actions))
for x, y in zip(a._actions , b._actions):
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , lowercase_) and yy.get('choices' , lowercase_):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](lowercase_) , yy['type'](lowercase_))
del xx["type"], yy["type"]
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument('--bar' , type=lowercase_ , required=lowercase_)
expected.add_argument('--baz' , type=lowercase_ , required=lowercase_)
expected.add_argument('--flag' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
self.argparsersEqual(lowercase_ , lowercase_)
a__ =['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((a__) , ) =parser.parse_args_into_dataclasses(lowercase_ , look_for_args_file=lowercase_)
self.assertFalse(example.flag)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
expected.add_argument('--baz' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=lowercase_ , dest='baz')
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
a__ =[WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--no_baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto)
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args_into_dataclasses(['--foo', 'titi'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi)
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
a__ =parser.parse_args_into_dataclasses(['--foo', '42'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo)
def __UpperCamelCase ( self) -> List[Any]:
@dataclass
class lowercase_ :
snake_case ="toto"
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
def __UpperCamelCase ( self) -> Optional[int]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=lowercase_)
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(
lowercase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3]) , )
a__ =parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split())
self.assertEqual(lowercase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7]))
def __UpperCamelCase ( self) -> Dict:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=lowercase_ , type=lowercase_)
expected.add_argument('--bar' , default=lowercase_ , type=lowercase_ , help='help message')
expected.add_argument('--baz' , default=lowercase_ , type=lowercase_)
expected.add_argument('--ces' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--des' , nargs='+' , default=[] , type=lowercase_)
a__ =[OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , bar=lowercase_ , baz=lowercase_ , ces=[] , des=[]))
a__ =parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split())
self.assertEqual(lowercase_ , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3]))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=lowercase_ , required=lowercase_)
expected.add_argument('--required_str' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
a__ =parser.parse_dict(lowercase_)[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(lowercase_ , parser.parse_dict , lowercase_ , allow_extra_keys=lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_json')
os.mkdir(lowercase_)
with open(temp_local_path + '.json' , 'w+') as f:
json.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.json'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_yaml')
os.mkdir(lowercase_)
with open(temp_local_path + '.yaml' , 'w+') as f:
yaml.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.yaml'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
self.assertIsNotNone(lowercase_)
| 20 | 1 |
import math
from collections.abc import Iterator
from itertools import takewhile
def _lowercase( __a : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowercase( ):
a__ =2
while True:
if is_prime(__a ):
yield num
num += 1
def _lowercase( __a : int = 200_0000 ):
return sum(takewhile(lambda __a : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class lowercase_ (lowercase__ ):
snake_case ='autoformer'
snake_case ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , lowercase_ = None , lowercase_ = None , lowercase_ = "student_t" , lowercase_ = "nll" , lowercase_ = 1 , lowercase_ = [1, 2, 3, 4, 5, 6, 7] , lowercase_ = True , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 64 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 32 , lowercase_ = 32 , lowercase_ = "gelu" , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 100 , lowercase_ = 0.02 , lowercase_ = True , lowercase_=True , lowercase_ = 10 , lowercase_ = 25 , lowercase_ = 3 , **lowercase_ , ) -> Union[str, Any]:
# time series specific configuration
a__ =prediction_length
a__ =context_length if context_length is not None else prediction_length
a__ =distribution_output
a__ =loss
a__ =input_size
a__ =num_time_features
a__ =lags_sequence
a__ =scaling
a__ =num_dynamic_real_features
a__ =num_static_real_features
a__ =num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`')
a__ =cardinality
else:
a__ =[0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`')
a__ =embedding_dimension
else:
a__ =[min(50 , (cat + 1) // 2) for cat in self.cardinality]
a__ =num_parallel_samples
# Transformer architecture configuration
a__ =input_size * len(self.lags_sequence) + self._number_of_features
a__ =d_model
a__ =encoder_attention_heads
a__ =decoder_attention_heads
a__ =encoder_ffn_dim
a__ =decoder_ffn_dim
a__ =encoder_layers
a__ =decoder_layers
a__ =dropout
a__ =attention_dropout
a__ =activation_dropout
a__ =encoder_layerdrop
a__ =decoder_layerdrop
a__ =activation_function
a__ =init_std
a__ =use_cache
# Autoformer
a__ =label_length
a__ =moving_average
a__ =autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def __UpperCamelCase ( self) -> int:
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 20 | 1 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCAmelCase: Union[str, Any] = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
_lowerCAmelCase: Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
_lowerCAmelCase: List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
def __UpperCamelCase ( self) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=True , lowercase_=False) -> Any:
if rouge_types is None:
a__ =['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
a__ =rouge_scorer.RougeScorer(rouge_types=lowercase_ , use_stemmer=lowercase_)
if use_aggregator:
a__ =scoring.BootstrapAggregator()
else:
a__ =[]
for ref, pred in zip(lowercase_ , lowercase_):
a__ =scorer.score(lowercase_ , lowercase_)
if use_aggregator:
aggregator.add_scores(lowercase_)
else:
scores.append(lowercase_)
if use_aggregator:
a__ =aggregator.aggregate()
else:
a__ ={}
for key in scores[0]:
a__ =[score[key] for score in scores]
return result
| 20 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
class lowercase_ (lowercase__ ):
snake_case =['pixel_values']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
super().__init__(**lowercase_)
a__ =size if size is not None else {'shortest_edge': 256}
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =crop_size if crop_size is not None else {'height': 224, 'width': 224}
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_resize
a__ =size
a__ =resample
a__ =do_center_crop
a__ =crop_size
a__ =do_rescale
a__ =rescale_factor
a__ =do_normalize
a__ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a__ =get_resize_output_image_size(lowercase_ , size=size['shortest_edge'] , default_to_square=lowercase_)
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_) -> np.ndarray:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> Tuple:
a__ =do_resize if do_resize is not None else self.do_resize
a__ =size if size is not None else self.size
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =resample if resample is not None else self.resample
a__ =do_center_crop if do_center_crop is not None else self.do_center_crop
a__ =crop_size if crop_size is not None else self.crop_size
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_rescale if do_rescale is not None else self.do_rescale
a__ =rescale_factor if rescale_factor is not None else self.rescale_factor
a__ =do_normalize if do_normalize is not None else self.do_normalize
a__ =image_mean if image_mean is not None else self.image_mean
a__ =image_std if image_std is not None else self.image_std
a__ =make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a__ =[to_numpy_array(lowercase_) for image in images]
if do_resize:
a__ =[self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_center_crop:
a__ =[self.center_crop(image=lowercase_ , size=lowercase_) for image in images]
if do_rescale:
a__ =[self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_normalize:
a__ =[self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images]
a__ =[to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
a__ ={'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None) -> str:
a__ =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_) != len(lowercase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(lowercase_):
a__ =target_sizes.numpy()
a__ =[]
for idx in range(len(lowercase_)):
a__ =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_)
a__ =resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowercase_)
else:
a__ =logits.argmax(dim=1)
a__ =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 20 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_lowerCAmelCase: Tuple = logging.get_logger(__name__)
_lowerCAmelCase: Union[str, Any] = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
_lowerCAmelCase: List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _lowercase( __a : str ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
a__ =model_type_to_module_name(__a )
a__ =importlib.import_module(f""".{module_name}""" , 'transformers.models' )
try:
return getattr(__a , __a )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__a , '__name__' , __a ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
a__ =importlib.import_module('transformers' )
if hasattr(__a , __a ):
return getattr(__a , __a )
return None
def _lowercase( __a : Union[str, os.PathLike] , __a : Optional[Union[str, os.PathLike]] = None , __a : bool = False , __a : bool = False , __a : Optional[Dict[str, str]] = None , __a : Optional[Union[bool, str]] = None , __a : Optional[str] = None , __a : bool = False , **__a : Any , ):
a__ =get_file_from_repo(
__a , __a , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(__a , encoding='utf-8' ) as reader:
return json.load(__a )
class lowercase_ :
def __init__( self) -> str:
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.')
@classmethod
@replace_list_option_in_docstrings(lowercase_)
def __UpperCamelCase ( cls , lowercase_ , **lowercase_) -> str:
a__ =kwargs.pop('config' , lowercase_)
a__ =kwargs.pop('trust_remote_code' , lowercase_)
a__ =True
a__ , a__ =ImageProcessingMixin.get_image_processor_dict(lowercase_ , **lowercase_)
a__ =config_dict.get('image_processor_type' , lowercase_)
a__ =None
if "AutoImageProcessor" in config_dict.get('auto_map' , {}):
a__ =config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
a__ =config_dict.pop('feature_extractor_type' , lowercase_)
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.')
a__ =feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor')
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {}):
a__ =config_dict['auto_map']['AutoFeatureExtractor']
a__ =feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor')
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.')
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(lowercase_ , lowercase_):
a__ =AutoConfig.from_pretrained(lowercase_ , **lowercase_)
# It could be in `config.image_processor_type``
a__ =getattr(lowercase_ , 'image_processor_type' , lowercase_)
if hasattr(lowercase_ , 'auto_map') and "AutoImageProcessor" in config.auto_map:
a__ =config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
a__ =image_processor_class_from_name(lowercase_)
a__ =image_processor_auto_map is not None
a__ =image_processor_class is not None or type(lowercase_) in IMAGE_PROCESSOR_MAPPING
a__ =resolve_trust_remote_code(
lowercase_ , lowercase_ , lowercase_ , lowercase_)
if has_remote_code and trust_remote_code:
a__ =get_class_from_dynamic_module(
lowercase_ , lowercase_ , **lowercase_)
a__ =kwargs.pop('code_revision' , lowercase_)
if os.path.isdir(lowercase_):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(lowercase_ , **lowercase_)
elif image_processor_class is not None:
return image_processor_class.from_dict(lowercase_ , **lowercase_)
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(lowercase_) in IMAGE_PROCESSOR_MAPPING:
a__ =IMAGE_PROCESSOR_MAPPING[type(lowercase_)]
return image_processor_class.from_dict(lowercase_ , **lowercase_)
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys())}""")
@staticmethod
def __UpperCamelCase ( lowercase_ , lowercase_) -> Union[str, Any]:
IMAGE_PROCESSOR_MAPPING.register(lowercase_ , lowercase_)
| 20 |
from importlib import import_module
from .logging import get_logger
_lowerCAmelCase: str = get_logger(__name__)
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None) -> Tuple:
a__ =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__'):
setattr(self , lowercase_ , getattr(lowercase_ , lowercase_))
a__ =module._original_module if isinstance(lowercase_ , _PatchedModuleObj) else module
class lowercase_ :
snake_case =[]
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> List[str]:
a__ =obj
a__ =target
a__ =new
a__ =target.split('.')[0]
a__ ={}
a__ =attrs or []
def __enter__( self) -> Optional[int]:
*a__ , a__ =self.target.split('.')
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase_)):
try:
a__ =import_module('.'.join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
a__ =getattr(self.obj , lowercase_)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase_ , _PatchedModuleObj) and obj_attr._original_module is submodule)
):
a__ =obj_attr
# patch at top level
setattr(self.obj , lowercase_ , _PatchedModuleObj(lowercase_ , attrs=self.attrs))
a__ =getattr(self.obj , lowercase_)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase_ , lowercase_ , _PatchedModuleObj(getattr(lowercase_ , lowercase_ , lowercase_) , attrs=self.attrs))
a__ =getattr(lowercase_ , lowercase_)
# finally set the target attribute
setattr(lowercase_ , lowercase_ , self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
a__ =getattr(import_module('.'.join(lowercase_)) , lowercase_)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase_) is attr_value:
a__ =getattr(self.obj , lowercase_)
setattr(self.obj , lowercase_ , self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
a__ =globals()['__builtins__'][target_attr]
setattr(self.obj , lowercase_ , self.new)
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""")
def __exit__( self , *lowercase_) -> str:
for attr in list(self.original):
setattr(self.obj , lowercase_ , self.original.pop(lowercase_))
def __UpperCamelCase ( self) -> Any:
self.__enter__()
self._active_patches.append(self)
def __UpperCamelCase ( self) -> Union[str, Any]:
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 20 | 1 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=30 , lowercase_=2 , lowercase_=3 , lowercase_=True , lowercase_=True , lowercase_=32 , lowercase_=2 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=10 , lowercase_=0.02 , lowercase_=3 , lowercase_=0.6 , lowercase_=None , ) -> Dict:
a__ =parent
a__ =batch_size
a__ =image_size
a__ =patch_size
a__ =num_channels
a__ =is_training
a__ =use_labels
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =type_sequence_label_size
a__ =initializer_range
a__ =mask_ratio
a__ =scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
a__ =(image_size // patch_size) ** 2
a__ =int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def __UpperCamelCase ( self) -> List[Any]:
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =None
if self.use_labels:
a__ =ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self) -> Dict:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> Any:
a__ =TFViTMAEModel(config=lowercase_)
a__ =model(lowercase_ , training=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> Optional[int]:
a__ =TFViTMAEForPreTraining(lowercase_)
a__ =model(lowercase_ , training=lowercase_)
# expected sequence length = num_patches
a__ =(self.image_size // self.patch_size) ** 2
a__ =self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
a__ =1
a__ =TFViTMAEForPreTraining(lowercase_)
a__ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a__ =model(lowercase_ , training=lowercase_)
a__ =self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def __UpperCamelCase ( self) -> List[str]:
a__ =self.prepare_config_and_inputs()
((a__) , (a__) , (a__)) =config_and_inputs
a__ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowercase_ (lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
snake_case ={'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> Dict:
a__ =TFViTMAEModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37)
def __UpperCamelCase ( self) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds')
def __UpperCamelCase ( self) -> Optional[int]:
pass
def __UpperCamelCase ( self) -> List[Any]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer))
a__ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , tf.keras.layers.Layer))
def __UpperCamelCase ( self) -> Dict:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase_)
def __UpperCamelCase ( self) -> Any:
# make the mask reproducible
np.random.seed(2)
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =int((config.image_size // config.patch_size) ** 2)
a__ =np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model(lowercase_ , noise=lowercase_)
a__ =copy.deepcopy(self._prepare_for_class(lowercase_ , lowercase_))
a__ =model(**lowercase_ , noise=lowercase_)
a__ =outputs_dict[0].numpy()
a__ =outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords)) , 1e-6)
def __UpperCamelCase ( self) -> Any:
# make the mask reproducible
np.random.seed(2)
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =int((config.image_size // config.patch_size) ** 2)
a__ =np.random.uniform(size=(self.model_tester.batch_size, num_patches))
def prepare_numpy_arrays(lowercase_):
a__ ={}
for k, v in inputs_dict.items():
if tf.is_tensor(lowercase_):
a__ =v.numpy()
else:
a__ =np.array(lowercase_)
return inputs_np_dict
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =prepare_numpy_arrays(lowercase_)
a__ =model(lowercase_ , noise=lowercase_)
a__ =model(**lowercase_ , noise=lowercase_)
self.assert_outputs_same(lowercase_ , lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> Union[str, Any]:
# make masks reproducible
np.random.seed(2)
a__ =int((tf_model.config.image_size // tf_model.config.patch_size) ** 2)
a__ =np.random.uniform(size=(self.model_tester.batch_size, num_patches))
a__ =tf.constant(lowercase_)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
a__ =tf_noise
super().check_pt_tf_models(lowercase_ , lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
# make mask reproducible
np.random.seed(2)
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ ={
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__),)
for module_member_name in dir(lowercase_)
if module_member_name.endswith('MainLayer')
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer')] == model_class.__name__[: -len('Model')]
for module_member in (getattr(lowercase_ , lowercase_),)
if isinstance(lowercase_ , lowercase_)
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowercase_ , '_keras_serializable' , lowercase_)
}
a__ =int((config.image_size // config.patch_size) ** 2)
a__ =np.random.uniform(size=(self.model_tester.batch_size, num_patches))
a__ =tf.convert_to_tensor(lowercase_)
inputs_dict.update({'noise': noise})
for main_layer_class in tf_main_layer_classes:
a__ =main_layer_class(lowercase_)
a__ ={
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype) for name, tensor in inputs_dict.items()
}
a__ =tf.keras.Model(lowercase_ , outputs=main_layer(lowercase_))
a__ =model(lowercase_)
with tempfile.TemporaryDirectory() as tmpdirname:
a__ =os.path.join(lowercase_ , 'keras_model.h5')
model.save(lowercase_)
a__ =tf.keras.models.load_model(
lowercase_ , custom_objects={main_layer_class.__name__: main_layer_class})
assert isinstance(lowercase_ , tf.keras.Model)
a__ =model(lowercase_)
self.assert_outputs_same(lowercase_ , lowercase_)
@slow
def __UpperCamelCase ( self) -> Union[str, Any]:
# make mask reproducible
np.random.seed(2)
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =int((config.image_size // config.patch_size) ** 2)
a__ =np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model(lowercase_ , noise=lowercase_)
if model_class.__name__ == "TFViTMAEModel":
a__ =outputs.last_hidden_state.numpy()
a__ =0
else:
a__ =outputs.logits.numpy()
a__ =0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ , saved_model=lowercase_)
a__ =model_class.from_pretrained(lowercase_)
a__ =model(lowercase_ , noise=lowercase_)
if model_class.__name__ == "TFViTMAEModel":
a__ =after_outputs['last_hidden_state'].numpy()
a__ =0
else:
a__ =after_outputs['logits'].numpy()
a__ =0
a__ =np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowercase_ , 1e-5)
def __UpperCamelCase ( self) -> Any:
# make mask reproducible
np.random.seed(2)
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =int((config.image_size // config.patch_size) ** 2)
a__ =np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model(lowercase_ , noise=lowercase_)
a__ =model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowercase_)
a__ =model_class.from_config(model.get_config())
# make sure it also accepts a normal config
a__ =model_class.from_config(model.config)
a__ =new_model(lowercase_) # Build model
new_model.set_weights(model.get_weights())
a__ =new_model(lowercase_ , noise=lowercase_)
self.assert_outputs_same(lowercase_ , lowercase_)
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load')
def __UpperCamelCase ( self) -> List[str]:
pass
@slow
def __UpperCamelCase ( self) -> int:
a__ =TFViTMAEModel.from_pretrained('google/vit-base-patch16-224')
self.assertIsNotNone(lowercase_)
def _lowercase( ):
a__ =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowercase_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self) -> Dict:
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base') if is_vision_available() else None
@slow
def __UpperCamelCase ( self) -> Optional[int]:
# make random mask reproducible across the PT and TF model
np.random.seed(2)
a__ =TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base')
a__ =self.default_image_processor
a__ =prepare_img()
a__ =image_processor(images=lowercase_ , return_tensors='tf')
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
a__ =ViTMAEConfig()
a__ =int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
a__ =np.random.uniform(size=(1, num_patches))
# forward pass
a__ =model(**lowercase_ , noise=lowercase_)
# verify the logits
a__ =tf.convert_to_tensor([1, 196, 768])
self.assertEqual(outputs.logits.shape , lowercase_)
a__ =tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]])
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowercase_ , atol=1e-4)
| 20 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_lowerCAmelCase: int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowercase( __a : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
_lowerCAmelCase: str = parser.parse_args()
_lowerCAmelCase: Tuple = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 20 | 1 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=sys.maxsize) -> Any:
a__ ='bilinear'
a__ =max_size
a__ =short_edge_length
def __call__( self , lowercase_) -> List[Any]:
a__ =[]
for img in imgs:
a__ , a__ =img.shape[:2]
# later: provide list and randomly choose index for resize
a__ =np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
a__ =size * 1.0 / min(lowercase_ , lowercase_)
if h < w:
a__ , a__ =size, scale * w
else:
a__ , a__ =scale * h, size
if max(lowercase_ , lowercase_) > self.max_size:
a__ =self.max_size * 1.0 / max(lowercase_ , lowercase_)
a__ =newh * scale
a__ =neww * scale
a__ =int(neww + 0.5)
a__ =int(newh + 0.5)
if img.dtype == np.uinta:
a__ =Image.fromarray(lowercase_)
a__ =pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
a__ =np.asarray(lowercase_)
else:
a__ =img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
a__ =nn.functional.interpolate(
lowercase_ , (newh, neww) , mode=self.interp_method , align_corners=lowercase_).squeeze(0)
img_augs.append(lowercase_)
return img_augs
class lowercase_ :
def __init__( self , lowercase_) -> Any:
a__ =ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
a__ =cfg.INPUT.FORMAT
a__ =cfg.SIZE_DIVISIBILITY
a__ =cfg.PAD_VALUE
a__ =cfg.INPUT.MAX_SIZE_TEST
a__ =cfg.MODEL.DEVICE
a__ =torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
a__ =torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
a__ =lambda lowercase_: (x - self.pixel_mean) / self.pixel_std
def __UpperCamelCase ( self , lowercase_) -> Tuple:
a__ =tuple(max(lowercase_) for s in zip(*[img.shape for img in images]))
a__ =[im.shape[-2:] for im in images]
a__ =[
nn.functional.pad(
lowercase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowercase_ , lowercase_)
]
return torch.stack(lowercase_), torch.tensor(lowercase_)
def __call__( self , lowercase_ , lowercase_=False) -> Dict:
with torch.no_grad():
if not isinstance(lowercase_ , lowercase_):
a__ =[images]
if single_image:
assert len(lowercase_) == 1
for i in range(len(lowercase_)):
if isinstance(images[i] , torch.Tensor):
images.insert(lowercase_ , images.pop(lowercase_).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
lowercase_ , torch.as_tensor(img_tensorize(images.pop(lowercase_) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
a__ =torch.tensor([im.shape[:2] for im in images])
a__ =self.aug(lowercase_)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
a__ =[self.normalizer(lowercase_) for x in images]
# now pad them to do the following operations
a__ , a__ =self.pad(lowercase_)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
a__ =torch.true_divide(lowercase_ , lowercase_)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowercase( __a : str , __a : Tuple ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowercase( __a : Dict , __a : Tuple[int, int] ):
assert torch.isfinite(__a ).all(), "Box tensor contains infinite or NaN!"
a__ , a__ =box_size
tensor[:, 0].clamp_(min=0 , max=__a )
tensor[:, 1].clamp_(min=0 , max=__a )
tensor[:, 2].clamp_(min=0 , max=__a )
tensor[:, 3].clamp_(min=0 , max=__a )
| 20 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase: Tuple = get_logger(__name__)
_lowerCAmelCase: List[str] = Path(__file__).parent / 'model_card_template.md'
_lowerCAmelCase: Any = uuida().hex
_lowerCAmelCase: List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: int = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def _lowercase( __a : Union[Dict, str, None] = None ):
a__ =f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__a , __a ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__a , __a ):
ua += "; " + user_agent
return ua
def _lowercase( __a : str , __a : Optional[str] = None , __a : Optional[str] = None ):
if token is None:
a__ =HfFolder.get_token()
if organization is None:
a__ =whoami(__a )['name']
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def _lowercase( __a : Union[str, Any] , __a : Dict ):
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(__a , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
a__ =args.hub_token if hasattr(__a , 'hub_token' ) else None
a__ =get_full_repo_name(__a , token=__a )
a__ =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__a , model_name=__a , repo_name=__a , dataset_name=args.dataset_name if hasattr(__a , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__a , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__a , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__a , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__a , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__a , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__a , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(__a , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__a , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
a__ =os.path.join(args.output_dir , 'README.md' )
model_card.save(__a )
def _lowercase( __a : Optional[str] , __a : Optional[str] = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
a__ =str(Path(__a ).as_posix() )
a__ =re.search(r'snapshots/([^/]+)/' , __a )
if search is None:
return None
a__ =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase: List[str] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
_lowerCAmelCase: List[str] = os.path.join(hf_cache_home, 'diffusers')
def _lowercase( __a : Optional[str] = None , __a : Optional[str] = None ):
if new_cache_dir is None:
a__ =DIFFUSERS_CACHE
if old_cache_dir is None:
a__ =old_diffusers_cache
a__ =Path(__a ).expanduser()
a__ =Path(__a ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
a__ =new_cache_dir / old_blob_path.relative_to(__a )
new_blob_path.parent.mkdir(parents=__a , exist_ok=__a )
os.replace(__a , __a )
try:
os.symlink(__a , __a )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase: Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
_lowerCAmelCase: int = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase: List[Any] = int(f.read())
except ValueError:
_lowerCAmelCase: Any = 0
if cache_version < 1:
_lowerCAmelCase: str = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase: Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def _lowercase( __a : str , __a : Optional[str] = None ):
if variant is not None:
a__ =weights_name.split('.' )
a__ =splits[:-1] + [variant] + splits[-1:]
a__ ='.'.join(__a )
return weights_name
def _lowercase( __a : Union[str, Any] , *,
__a : Optional[Any] , __a : Optional[Any] , __a : List[Any] , __a : Tuple , __a : Optional[Any] , __a : Dict , __a : str , __a : int , __a : Tuple , __a : Union[str, Any] , __a : int=None , ):
a__ =str(__a )
if os.path.isfile(__a ):
return pretrained_model_name_or_path
elif os.path.isdir(__a ):
if os.path.isfile(os.path.join(__a , __a ) ):
# Load from a PyTorch checkpoint
a__ =os.path.join(__a , __a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__a , __a , __a ) ):
a__ =os.path.join(__a , __a , __a )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__a ).base_version ) >= version.parse('0.20.0' )
):
try:
a__ =hf_hub_download(
__a , filename=_add_variant(__a , __a ) , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __a , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__a , __a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__a , __a )}' so that the correct variant file can be added.""" , __a , )
try:
# 2. Load model file as usual
a__ =hf_hub_download(
__a , filename=__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'this model name. Check the model page at '
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 20 | 1 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase: Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowercase_ (lowercase__ ):
snake_case ='segformer'
def __init__( self , lowercase_=3 , lowercase_=4 , lowercase_=[2, 2, 2, 2] , lowercase_=[8, 4, 2, 1] , lowercase_=[32, 64, 160, 256] , lowercase_=[7, 3, 3, 3] , lowercase_=[4, 2, 2, 2] , lowercase_=[1, 2, 5, 8] , lowercase_=[4, 4, 4, 4] , lowercase_="gelu" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_=0.02 , lowercase_=0.1 , lowercase_=1e-6 , lowercase_=256 , lowercase_=255 , **lowercase_ , ) -> str:
super().__init__(**lowercase_)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , lowercase_ , )
a__ =num_channels
a__ =num_encoder_blocks
a__ =depths
a__ =sr_ratios
a__ =hidden_sizes
a__ =patch_sizes
a__ =strides
a__ =mlp_ratios
a__ =num_attention_heads
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =classifier_dropout_prob
a__ =initializer_range
a__ =drop_path_rate
a__ =layer_norm_eps
a__ =decoder_hidden_size
a__ =kwargs.get('reshape_last_stage' , lowercase_)
a__ =semantic_loss_ignore_index
class lowercase_ (lowercase__ ):
snake_case =version.parse('1.11' )
@property
def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __UpperCamelCase ( self) -> float:
return 1e-4
@property
def __UpperCamelCase ( self) -> int:
return 12
| 20 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger()
def _lowercase( __a : int , __a : str , __a : LevitConfig , __a : Path , __a : bool = True ):
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
a__ =timm.create_model('levit_128s' , pretrained=__a )
else:
a__ =timm.create_model('levit_128' , pretrained=__a )
if hidden_sizes == 192:
a__ =timm.create_model('levit_192' , pretrained=__a )
if hidden_sizes == 256:
a__ =timm.create_model('levit_256' , pretrained=__a )
if hidden_sizes == 384:
a__ =timm.create_model('levit_384' , pretrained=__a )
from_model.eval()
a__ =LevitForImageClassificationWithTeacher(__a ).eval()
a__ =OrderedDict()
a__ =from_model.state_dict()
a__ =list(from_model.state_dict().keys() )
a__ =list(our_model.state_dict().keys() )
print(len(__a ) , len(__a ) )
for i in range(len(__a ) ):
a__ =weights[og_keys[i]]
our_model.load_state_dict(__a )
a__ =torch.randn((2, 3, 224, 224) )
a__ =from_model(__a )
a__ =our_model(__a ).logits
assert torch.allclose(__a , __a ), "The model logits don't match the original one."
a__ =name
print(__a )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
a__ =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def _lowercase( __a : Path , __a : str = None , __a : bool = True ):
a__ ='imagenet-1k-id2label.json'
a__ =1000
a__ =(1, num_labels)
a__ ='huggingface/label-files'
a__ =num_labels
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
a__ =partial(__a , num_labels=__a , idalabel=__a , labelaid=__a )
a__ ={
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
a__ ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __a , names_to_config[model_name] , __a , __a )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __a , __a , __a , __a )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_lowerCAmelCase: Union[str, Any] = parser.parse_args()
_lowerCAmelCase: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 20 | 1 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
_lowerCAmelCase: List[str] = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
_lowerCAmelCase: Dict = F"""https://www.google.com/search?q={query}&num=100"""
_lowerCAmelCase: int = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
_lowerCAmelCase: Dict = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
_lowerCAmelCase: str = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 20 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowerCAmelCase: int = logging.get_logger(__name__)
_lowerCAmelCase: Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_lowerCAmelCase: Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowercase( __a : Optional[Any] ):
a__ ={}
with open(__a , 'r' ) as file:
for line_number, line in enumerate(__a ):
a__ =line.strip()
if line:
a__ =line.split()
a__ =line_number
a__ =words[0]
a__ =value
return result
def _lowercase( __a : Dict , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : str ):
for attribute in key.split('.' ):
a__ =getattr(__a , __a )
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ =getattr(__a , __a ).shape
elif weight_type is not None and weight_type == "param":
a__ =hf_pointer
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =shape_pointer.shape
# let's reduce dimension
a__ =value[0]
else:
a__ =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a__ =value
elif weight_type == "weight_g":
a__ =value
elif weight_type == "weight_v":
a__ =value
elif weight_type == "bias":
a__ =value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =value
else:
a__ =value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _lowercase( __a : Optional[int] , __a : int , __a : Optional[int] , __a : Optional[Any] , __a : List[Any] ):
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ ='.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a__ ='.'.join([key, hf_param_name] )
else:
a__ =key
a__ =value if 'lm_head' in full_key else value[0]
_lowerCAmelCase: Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowercase( __a : Dict , __a : int , __a : int=None , __a : List[str]=None ):
a__ =False
for key, mapped_key in MAPPING.items():
a__ ='wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
a__ =True
if "*" in mapped_key:
a__ =name.split(__a )[0].split('.' )[-2]
a__ =mapped_key.replace('*' , __a )
if "weight_g" in name:
a__ ='weight_g'
elif "weight_v" in name:
a__ ='weight_v'
elif "bias" in name:
a__ ='bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ ='weight'
else:
a__ =None
if hf_dict is not None:
rename_dict(__a , __a , __a , __a , __a )
else:
set_recursively(__a , __a , __a , __a , __a )
return is_used
return is_used
def _lowercase( __a : Union[str, Any] , __a : List[str] , __a : Dict ):
a__ =[]
a__ =fairseq_model.state_dict()
a__ =hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a__ =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == 'group' , )
a__ =True
else:
a__ =load_wavaveca_layer(__a , __a , __a )
if not is_used:
unused_weights.append(__a )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _lowercase( __a : List[Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] ):
a__ =full_name.split('conv_layers.' )[-1]
a__ =name.split('.' )
a__ =int(items[0] )
a__ =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def _lowercase( __a : str , __a : str , __a : Any=None , __a : str=None , __a : Any=True , __a : Union[str, Any]=False ):
if config_path is not None:
a__ =WavaVecaConfig.from_pretrained(__a )
else:
a__ =WavaVecaConfig()
if is_seq_class:
a__ =read_txt_into_dict(__a )
a__ =idalabel
a__ =WavaVecaForSequenceClassification(__a )
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
feature_extractor.save_pretrained(__a )
elif is_finetuned:
if dict_path:
a__ =Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ =target_dict.pad_index
a__ =target_dict.bos_index
a__ =target_dict.eos_index
a__ =len(target_dict.symbols )
a__ =os.path.join(__a , 'vocab.json' )
if not os.path.isdir(__a ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
a__ =target_dict.indices
# fairseq has the <pad> and <s> switched
a__ =0
a__ =1
with open(__a , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__a , __a )
a__ =WavaVecaCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__a , )
a__ =True if config.feat_extract_norm == 'layer' else False
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
a__ =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
a__ =WavaVecaForCTC(__a )
else:
a__ =WavaVecaForPreTraining(__a )
if is_finetuned or is_seq_class:
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
a__ =argparse.Namespace(task='audio_pretraining' )
a__ =fairseq.tasks.setup_task(__a )
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__a )
a__ =model[0].eval()
recursively_load_weights(__a , __a , not is_finetuned )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
_lowerCAmelCase: Tuple = parser.parse_args()
_lowerCAmelCase: Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 20 | 1 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> List[str]:
a__ ='ylacombe/bark-small'
a__ =tempfile.mkdtemp()
a__ ='en_speaker_1'
a__ ='This is a test string'
a__ ='speaker_embeddings_path.json'
a__ ='speaker_embeddings'
def __UpperCamelCase ( self , **lowercase_) -> Union[str, Any]:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_)
def __UpperCamelCase ( self) -> str:
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =self.get_tokenizer()
a__ =BarkProcessor(tokenizer=lowercase_)
processor.save_pretrained(self.tmpdirname)
a__ =BarkProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
@slow
def __UpperCamelCase ( self) -> List[str]:
a__ =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
a__ =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ =BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
def __UpperCamelCase ( self) -> List[Any]:
a__ =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
a__ =35
a__ =2
a__ =8
a__ ={
'semantic_prompt': np.ones(lowercase_),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len)),
'fine_prompt': np.ones((nb_codebooks_total, seq_len)),
}
# test providing already loaded voice_preset
a__ =processor(text=self.input_string , voice_preset=lowercase_)
a__ =inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([])).tolist())
# test loading voice preset from npz file
a__ =os.path.join(self.tmpdirname , 'file.npz')
np.savez(lowercase_ , **lowercase_)
a__ =processor(text=self.input_string , voice_preset=lowercase_)
a__ =inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([])).tolist())
# test loading voice preset from the hub
a__ =processor(text=self.input_string , voice_preset=self.voice_preset)
def __UpperCamelCase ( self) -> Dict:
a__ =self.get_tokenizer()
a__ =BarkProcessor(tokenizer=lowercase_)
a__ =processor(text=self.input_string)
a__ =tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
| 20 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self) -> Optional[int]:
a__ =AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowercase_).to(lowercase_)
a__ =AutoTokenizer.from_pretrained('google/mt5-small')
a__ =tokenizer('Hello there' , return_tensors='pt').input_ids
a__ =tokenizer('Hi I am' , return_tensors='pt').input_ids
a__ =model(input_ids.to(lowercase_) , labels=labels.to(lowercase_)).loss
a__ =-(labels.shape[-1] * loss.item())
a__ =-84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 20 | 1 |
class lowercase_ :
def __init__( self) -> str:
a__ =0
a__ =0
a__ ={}
def __UpperCamelCase ( self , lowercase_) -> Dict:
if vertex not in self.adjacency:
a__ ={}
self.num_vertices += 1
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> Dict:
self.add_vertex(lowercase_)
self.add_vertex(lowercase_)
if head == tail:
return
a__ =weight
a__ =weight
def __UpperCamelCase ( self) -> Optional[int]:
a__ =self.get_edges()
for edge in edges:
a__ , a__ , a__ =edge
edges.remove((tail, head, weight))
for i in range(len(lowercase_)):
a__ =list(edges[i])
edges.sort(key=lambda lowercase_: e[2])
for i in range(len(lowercase_) - 1):
if edges[i][2] >= edges[i + 1][2]:
a__ =edges[i][2] + 1
for edge in edges:
a__ , a__ , a__ =edge
a__ =weight
a__ =weight
def __str__( self) -> Tuple:
a__ =''
for tail in self.adjacency:
for head in self.adjacency[tail]:
a__ =self.adjacency[head][tail]
string += F"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n')
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def __UpperCamelCase ( self) -> int:
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( lowercase_=None , lowercase_=None) -> Dict:
a__ =Graph()
if vertices is None:
a__ =[]
if edges is None:
a__ =[]
for vertex in vertices:
g.add_vertex(lowercase_)
for edge in edges:
g.add_edge(*lowercase_)
return g
class lowercase_ :
def __init__( self) -> Tuple:
a__ ={}
a__ ={}
def __len__( self) -> Dict:
return len(self.parent)
def __UpperCamelCase ( self , lowercase_) -> Dict:
if item in self.parent:
return self.find(lowercase_)
a__ =item
a__ =0
return item
def __UpperCamelCase ( self , lowercase_) -> Optional[Any]:
if item not in self.parent:
return self.make_set(lowercase_)
if item != self.parent[item]:
a__ =self.find(self.parent[item])
return self.parent[item]
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> Dict:
a__ =self.find(lowercase_)
a__ =self.find(lowercase_)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
a__ =roota
return roota
if self.rank[roota] < self.rank[roota]:
a__ =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
a__ =roota
return roota
return None
@staticmethod
def __UpperCamelCase ( lowercase_) -> Optional[Any]:
a__ =graph.num_vertices
a__ =Graph.UnionFind()
a__ =[]
while num_components > 1:
a__ ={}
for vertex in graph.get_vertices():
a__ =-1
a__ =graph.get_edges()
for edge in edges:
a__ , a__ , a__ =edge
edges.remove((tail, head, weight))
for edge in edges:
a__ , a__ , a__ =edge
a__ =union_find.find(lowercase_)
a__ =union_find.find(lowercase_)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
a__ =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
a__ =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
a__ , a__ , a__ =cheap_edge[vertex]
if union_find.find(lowercase_) != union_find.find(lowercase_):
union_find.union(lowercase_ , lowercase_)
mst_edges.append(cheap_edge[vertex])
a__ =num_components - 1
a__ =Graph.build(edges=lowercase_)
return mst
| 20 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> int:
a__ =tempfile.mkdtemp()
a__ =BlipImageProcessor()
a__ =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel')
a__ =BlipProcessor(lowercase_ , lowercase_)
processor.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self , **lowercase_) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).tokenizer
def __UpperCamelCase ( self , **lowercase_) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).image_processor
def __UpperCamelCase ( self) -> Optional[int]:
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self) -> str:
a__ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
a__ =[Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self) -> str:
a__ =BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__ =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ =self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0)
a__ =BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase_)
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =self.prepare_image_inputs()
a__ =image_processor(lowercase_ , return_tensors='np')
a__ =processor(images=lowercase_ , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =processor(text=lowercase_)
a__ =tokenizer(lowercase_ , return_token_type_ids=lowercase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(lowercase_):
processor()
def __UpperCamelCase ( self) -> Tuple:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ =processor.batch_decode(lowercase_)
a__ =tokenizer.batch_decode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
| 20 | 1 |
from __future__ import annotations
_lowerCAmelCase: Tuple = tuple[int, int, int]
_lowerCAmelCase: Any = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
_lowerCAmelCase: int = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
_lowerCAmelCase: Optional[int] = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
_lowerCAmelCase: str = 'FOBHMDKEXQNRAULPGSJVTYICZW'
_lowerCAmelCase: List[Any] = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
_lowerCAmelCase: Tuple = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
_lowerCAmelCase: str = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
_lowerCAmelCase: Any = 'SGLCPQWZHKXAREONTFBVIYJUDM'
_lowerCAmelCase: str = 'HVSICLTYKQUBXDWAJZOMFGPREN'
_lowerCAmelCase: Tuple = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
_lowerCAmelCase: Optional[Any] = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
_lowerCAmelCase: List[str] = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def _lowercase( __a : RotorPositionT , __a : RotorSelectionT , __a : str ):
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(__a ) )) < 3:
a__ =f"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(__a )
# Checks if rotor positions are valid
a__ , a__ , a__ =rotpos
if not 0 < rotorposa <= len(__a ):
a__ =f"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(__a )
if not 0 < rotorposa <= len(__a ):
a__ =f"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(__a )
if not 0 < rotorposa <= len(__a ):
a__ =f"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(__a )
# Validates string and returns dict
a__ =_plugboard(__a )
return rotpos, rotsel, pbdict
def _lowercase( __a : str ):
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(__a , __a ):
a__ =f"""Plugboard setting isn't type string ({type(__a )})"""
raise TypeError(__a )
elif len(__a ) % 2 != 0:
a__ =f"""Odd number of symbols ({len(__a )})"""
raise Exception(__a )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
a__ =set()
for i in pbstring:
if i not in abc:
a__ =f"""'{i}' not in list of symbols"""
raise Exception(__a )
elif i in tmppbl:
a__ =f"""Duplicate symbol ({i})"""
raise Exception(__a )
else:
tmppbl.add(__a )
del tmppbl
# Created the dictionary
a__ ={}
for j in range(0 , len(__a ) - 1 , 2 ):
a__ =pbstring[j + 1]
a__ =pbstring[j]
return pb
def _lowercase( __a : str , __a : RotorPositionT , __a : RotorSelectionT = (rotora, rotora, rotora) , __a : str = "" , ):
a__ =text.upper()
a__ , a__ , a__ =_validator(
__a , __a , plugb.upper() )
a__ , a__ , a__ =rotor_position
a__ , a__ , a__ =rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
a__ =[]
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
a__ =plugboard[symbol]
# rotor ra --------------------------
a__ =abc.index(__a ) + rotorposa
a__ =rotora[index % len(__a )]
# rotor rb --------------------------
a__ =abc.index(__a ) + rotorposa
a__ =rotora[index % len(__a )]
# rotor rc --------------------------
a__ =abc.index(__a ) + rotorposa
a__ =rotora[index % len(__a )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
a__ =reflector[symbol]
# 2nd rotors
a__ =abc[rotora.index(__a ) - rotorposa]
a__ =abc[rotora.index(__a ) - rotorposa]
a__ =abc[rotora.index(__a ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
a__ =plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(__a ):
a__ =0
rotorposa += 1
if rotorposa >= len(__a ):
a__ =0
rotorposa += 1
if rotorposa >= len(__a ):
a__ =0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(__a )
return "".join(__a )
if __name__ == "__main__":
_lowerCAmelCase: List[Any] = 'This is my Python script that emulates the Enigma machine from WWII.'
_lowerCAmelCase: int = (1, 1, 1)
_lowerCAmelCase: Any = 'pictures'
_lowerCAmelCase: Any = (rotora, rotora, rotora)
_lowerCAmelCase: Any = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 20 |
def _lowercase( __a : list[int] ):
a__ =len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
a__ , a__ =numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_lowerCAmelCase: Tuple = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase: int = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 20 | 1 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase: List[str] = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =BartphoTokenizer
snake_case =False
snake_case =True
def __UpperCamelCase ( self) -> List[str]:
super().setUp()
a__ =['▁This', '▁is', '▁a', '▁t', 'est']
a__ =dict(zip(lowercase_ , range(len(lowercase_))))
a__ ={'unk_token': '<unk>'}
a__ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'])
with open(self.monolingual_vocab_file , 'w' , encoding='utf-8') as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""")
a__ =BartphoTokenizer(lowercase_ , self.monolingual_vocab_file , **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self , **lowercase_) -> Optional[Any]:
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname , **lowercase_)
def __UpperCamelCase ( self , lowercase_) -> Dict:
a__ ='This is a là test'
a__ ='This is a<unk><unk> test'
return input_text, output_text
def __UpperCamelCase ( self) -> Any:
a__ =BartphoTokenizer(lowercase_ , self.monolingual_vocab_file , **self.special_tokens_map)
a__ ='This is a là test'
a__ ='▁This ▁is ▁a ▁l à ▁t est'.split()
a__ =tokenizer.tokenize(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
a__ =tokens + [tokenizer.unk_token]
a__ =[4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_) , lowercase_)
| 20 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="resnet50" , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=True , lowercase_=True , ) -> Union[str, Any]:
a__ =parent
a__ =out_indices if out_indices is not None else [4]
a__ =stage_names
a__ =out_features
a__ =backbone
a__ =batch_size
a__ =image_size
a__ =num_channels
a__ =use_pretrained_backbone
a__ =is_training
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =self.get_config()
return config, pixel_values
def __UpperCamelCase ( self) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =TimmBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
a__ =model(lowercase_)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __UpperCamelCase ( self) -> str:
a__ =self.prepare_config_and_inputs()
a__ , a__ =config_and_inputs
a__ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(TimmBackbone,) if is_torch_available() else ()
snake_case ={'feature-extraction': TimmBackbone} if is_torch_available() else {}
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =TimmBackboneModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def __UpperCamelCase ( self) -> Dict:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self) -> str:
a__ ='resnet18'
a__ ='microsoft/resnet-18'
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_)
a__ =AutoBackbone.from_pretrained(lowercase_)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3])
a__ =AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> Dict:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.')
def __UpperCamelCase ( self) -> str:
pass
@unittest.skip('Safetensors is not supported by timm.')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self) -> Optional[Any]:
pass
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =True
a__ =self.has_attentions
# no need to test all models as different heads yield the same functionality
a__ =self.all_model_classes[0]
a__ =model_class(lowercase_)
model.to(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model(**lowercase_)
a__ =outputs[0][-1]
# Encoder-/Decoder-only models
a__ =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a__ =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def __UpperCamelCase ( self) -> List[str]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
a__ =copy.deepcopy(lowercase_)
a__ =None
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
a__ =copy.deepcopy(lowercase_)
a__ =False
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
| 20 | 1 |
from __future__ import annotations
def _lowercase( __a : str , __a : str ):
a__ =get_failure_array(__a )
# 2) Step through text searching for pattern
a__ , a__ =0, 0 # index into text, pattern
while i < len(__a ):
if pattern[j] == text[i]:
if j == (len(__a ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
a__ =failure[j - 1]
continue
i += 1
return False
def _lowercase( __a : str ):
a__ =[0]
a__ =0
a__ =1
while j < len(__a ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
a__ =failure[i - 1]
continue
j += 1
failure.append(__a )
return failure
if __name__ == "__main__":
# Test 1)
_lowerCAmelCase: List[Any] = 'abc1abc12'
_lowerCAmelCase: List[Any] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
_lowerCAmelCase: Optional[Any] = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
_lowerCAmelCase: Any = 'ABABX'
_lowerCAmelCase: Optional[Any] = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
_lowerCAmelCase: Dict = 'AAAB'
_lowerCAmelCase: Optional[Any] = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
_lowerCAmelCase: Union[str, Any] = 'abcdabcy'
_lowerCAmelCase: Optional[Any] = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
_lowerCAmelCase: Optional[Any] = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 20 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCAmelCase: Optional[Any] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[str] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20 | 1 |
from __future__ import annotations
_lowerCAmelCase: str = '#'
class lowercase_ :
def __init__( self) -> None:
a__ ={}
def __UpperCamelCase ( self , lowercase_) -> None:
a__ =self._trie
for char in text:
if char not in trie:
a__ ={}
a__ =trie[char]
a__ =True
def __UpperCamelCase ( self , lowercase_) -> tuple | list:
a__ =self._trie
for char in prefix:
if char in trie:
a__ =trie[char]
else:
return []
return self._elements(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> tuple:
a__ =[]
for c, v in d.items():
a__ =[' '] if c == END else [(c + s) for s in self._elements(lowercase_)]
result.extend(lowercase_)
return tuple(lowercase_)
_lowerCAmelCase: Optional[int] = Trie()
_lowerCAmelCase: List[str] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _lowercase( __a : str ):
a__ =trie.find_word(__a )
return tuple(string + word for word in suffixes )
def _lowercase( ):
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase: str = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowercase_ (lowercase__ ):
snake_case ='big_bird'
def __init__( self , lowercase_=50358 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu_new" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=4096 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=66 , lowercase_="block_sparse" , lowercase_=True , lowercase_=False , lowercase_=64 , lowercase_=3 , lowercase_=None , **lowercase_ , ) -> Any:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , )
a__ =vocab_size
a__ =max_position_embeddings
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =initializer_range
a__ =type_vocab_size
a__ =layer_norm_eps
a__ =use_cache
a__ =rescale_embeddings
a__ =attention_type
a__ =use_bias
a__ =block_size
a__ =num_random_blocks
a__ =classifier_dropout
class lowercase_ (lowercase__ ):
@property
def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a__ ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__ ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 20 | 1 |
from __future__ import annotations
from PIL import Image
# Define glider example
_lowerCAmelCase: Dict = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
_lowerCAmelCase: List[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _lowercase( __a : list[list[int]] ):
a__ =[]
for i in range(len(__a ) ):
a__ =[]
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
a__ =0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__a ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__a ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__a ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
a__ =cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__a )
return next_generation
def _lowercase( __a : list[list[int]] , __a : int ):
a__ =[]
for _ in range(__a ):
# Create output image
a__ =Image.new('RGB' , (len(cells[0] ), len(__a )) )
a__ =img.load()
# Save cells to image
for x in range(len(__a ) ):
for y in range(len(cells[0] ) ):
a__ =255 - cells[y][x] * 255
a__ =(colour, colour, colour)
# Save image
images.append(__a )
a__ =new_generation(__a )
return images
if __name__ == "__main__":
_lowerCAmelCase: List[str] = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 20 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger(__name__)
_lowerCAmelCase: Tuple = torch.device('cpu')
def _lowercase( ):
a__ ='http://images.cocodataset.org/val2017/000000039769.jpg'
a__ =Image.open(requests.get(__a , stream=__a ).raw )
return im
def _lowercase( __a : Optional[Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _lowercase( __a : int , __a : int , __a : Optional[Any] ):
a__ =dct.pop(__a )
a__ =val
def _lowercase( __a : Optional[Any] ):
a__ =[]
for k in state_dict.keys():
a__ =k
if ".pwconv" in k:
a__ =k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a__ =k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a__ =k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a__ =k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a__ =k_new.split('.' )
if ls[2].isdigit():
a__ ='swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__ =k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _lowercase( __a : Union[str, Any] , __a : int , __a : str ):
a__ =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ =1000
a__ ='huggingface/label-files'
a__ ='imagenet-1k-id2label.json'
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ =[3, 3, 6, 4]
a__ =[48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a__ =[3, 3, 9, 6]
a__ =[48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a__ =[4, 3, 10, 5]
a__ =[48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a__ =[4, 4, 12, 6]
a__ =[64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a__ =torch.hub.load_state_dict_from_url(__a , map_location='cpu' , check_hash=__a )
else:
a__ =torch.load(__a , map_location='cpu' )
a__ =checkpoint
a__ =create_rename_keys(__a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__a , __a , __a )
# load HuggingFace model
a__ =SwiftFormerForImageClassification(__a ).eval()
hf_model.load_state_dict(__a )
# prepare test inputs
a__ =prepare_img()
a__ =ViTImageProcessor.from_pretrained('preprocessor_config' )
a__ =processor(images=__a , return_tensors='pt' )
# compare outputs from both models
a__ =get_expected_output(__a )
a__ =hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __a , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_lowerCAmelCase: Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 20 | 1 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_lowerCAmelCase: Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
_lowerCAmelCase: Tuple = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split()
_lowerCAmelCase: int = '|'.join(sys.argv[1:])
_lowerCAmelCase: Optional[Any] = re.compile(RF"""^({joined_dirs}).*?\.py$""")
_lowerCAmelCase: Dict = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 20 |
from __future__ import annotations
from typing import Any
class lowercase_ :
def __init__( self , lowercase_) -> None:
a__ =num_of_nodes
a__ =[]
a__ ={}
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
self.m_edges.append([u_node, v_node, weight])
def __UpperCamelCase ( self , lowercase_) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def __UpperCamelCase ( self , lowercase_) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
a__ =self.find_component(lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
if component_size[u_node] <= component_size[v_node]:
a__ =v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowercase_)
elif component_size[u_node] >= component_size[v_node]:
a__ =self.find_component(lowercase_)
component_size[u_node] += component_size[v_node]
self.set_component(lowercase_)
def __UpperCamelCase ( self) -> None:
a__ =[]
a__ =0
a__ =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
a__ =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
a__ =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowercase_ , lowercase_):
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowercase_ , lowercase_ , lowercase_)
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""")
num_of_components -= 1
a__ =[-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""")
def _lowercase( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 1 |
import math
def _lowercase( __a : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowercase( __a : float = 0.1 ):
a__ =3
a__ =3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__a )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCAmelCase: Union[str, Any] = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
_lowerCAmelCase: Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
_lowerCAmelCase: List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
def __UpperCamelCase ( self) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=True , lowercase_=False) -> Any:
if rouge_types is None:
a__ =['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
a__ =rouge_scorer.RougeScorer(rouge_types=lowercase_ , use_stemmer=lowercase_)
if use_aggregator:
a__ =scoring.BootstrapAggregator()
else:
a__ =[]
for ref, pred in zip(lowercase_ , lowercase_):
a__ =scorer.score(lowercase_ , lowercase_)
if use_aggregator:
aggregator.add_scores(lowercase_)
else:
scores.append(lowercase_)
if use_aggregator:
a__ =aggregator.aggregate()
else:
a__ ={}
for key in scores[0]:
a__ =[score[key] for score in scores]
return result
| 20 | 1 |
from __future__ import annotations
def _lowercase( __a : int ):
a__ =2
a__ =[]
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__a )
if n > 1:
factors.append(__a )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
from __future__ import annotations
_lowerCAmelCase: str = '#'
class lowercase_ :
def __init__( self) -> None:
a__ ={}
def __UpperCamelCase ( self , lowercase_) -> None:
a__ =self._trie
for char in text:
if char not in trie:
a__ ={}
a__ =trie[char]
a__ =True
def __UpperCamelCase ( self , lowercase_) -> tuple | list:
a__ =self._trie
for char in prefix:
if char in trie:
a__ =trie[char]
else:
return []
return self._elements(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> tuple:
a__ =[]
for c, v in d.items():
a__ =[' '] if c == END else [(c + s) for s in self._elements(lowercase_)]
result.extend(lowercase_)
return tuple(lowercase_)
_lowerCAmelCase: Optional[int] = Trie()
_lowerCAmelCase: List[str] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _lowercase( __a : str ):
a__ =trie.find_word(__a )
return tuple(string + word for word in suffixes )
def _lowercase( ):
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 | 1 |
import requests
def _lowercase( __a : str , __a : str ):
a__ ={'Content-Type': 'application/json'}
a__ =requests.post(__a , json={'text': message_body} , headers=__a )
if response.status_code != 200:
a__ =(
'Request to slack returned an error '
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(__a )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 20 |
_lowerCAmelCase: List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _lowercase( ):
a__ =input('Enter message: ' )
a__ =input('Enter key [alphanumeric]: ' )
a__ =input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
a__ ='encrypt'
a__ =encrypt_message(__a , __a )
elif mode.lower().startswith('d' ):
a__ ='decrypt'
a__ =decrypt_message(__a , __a )
print(f"""\n{mode.title()}ed message:""" )
print(__a )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'encrypt' )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'decrypt' )
def _lowercase( __a : str , __a : str , __a : str ):
a__ =[]
a__ =0
a__ =key.upper()
for symbol in message:
a__ =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__a )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__a ):
a__ =0
else:
translated.append(__a )
return "".join(__a )
if __name__ == "__main__":
main()
| 20 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowercase_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =XLMRobertaModel.from_pretrained('xlm-roberta-base')
a__ =torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
a__ =torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
a__ =torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
a__ =model(lowercase_)['last_hidden_state'].detach()
self.assertEqual(output.shape , lowercase_)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowercase_ , atol=1e-3))
@slow
def __UpperCamelCase ( self) -> Tuple:
a__ =XLMRobertaModel.from_pretrained('xlm-roberta-large')
a__ =torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
a__ =torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
a__ =torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
a__ =model(lowercase_)['last_hidden_state'].detach()
self.assertEqual(output.shape , lowercase_)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowercase_ , atol=1e-3))
| 20 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase_ (lowercase__ ):
snake_case =['image_processor', 'tokenizer']
snake_case ='ViltImageProcessor'
snake_case =('BertTokenizer', 'BertTokenizerFast')
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_) -> Optional[Any]:
a__ =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase_ , )
a__ =kwargs.pop('feature_extractor')
a__ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(lowercase_ , lowercase_)
a__ =self.image_processor
def __call__( self , lowercase_ , lowercase_ = None , lowercase_ = True , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = True , lowercase_ = None , **lowercase_ , ) -> BatchEncoding:
a__ =self.tokenizer(
text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel_values + pixel_mask
a__ =self.image_processor(lowercase_ , return_tensors=lowercase_)
encoding.update(lowercase_)
return encoding
def __UpperCamelCase ( self , *lowercase_ , **lowercase_) -> int:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def __UpperCamelCase ( self , *lowercase_ , **lowercase_) -> Tuple:
return self.tokenizer.decode(*lowercase_ , **lowercase_)
@property
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =self.tokenizer.model_input_names
a__ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def __UpperCamelCase ( self) -> List[str]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , )
return self.image_processor
| 20 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =KandinskyVaaPriorPipeline
snake_case =['prompt']
snake_case =['prompt', 'negative_prompt']
snake_case =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
snake_case =False
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 32
@property
def __UpperCamelCase ( self) -> Tuple:
return 32
@property
def __UpperCamelCase ( self) -> int:
return self.time_input_dim
@property
def __UpperCamelCase ( self) -> str:
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 100
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
torch.manual_seed(0)
a__ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase_)
@property
def __UpperCamelCase ( self) -> Tuple:
torch.manual_seed(0)
a__ ={
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
a__ =PriorTransformer(**lowercase_)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a__ =nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def __UpperCamelCase ( self) -> Any:
torch.manual_seed(0)
a__ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a__ =CLIPVisionModelWithProjection(lowercase_)
return model
@property
def __UpperCamelCase ( self) -> Optional[int]:
a__ =CLIPImageProcessor(
crop_size=224 , do_center_crop=lowercase_ , do_normalize=lowercase_ , do_resize=lowercase_ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def __UpperCamelCase ( self) -> Any:
a__ =self.dummy_prior
a__ =self.dummy_image_encoder
a__ =self.dummy_text_encoder
a__ =self.dummy_tokenizer
a__ =self.dummy_image_processor
a__ =UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=lowercase_ , clip_sample_range=10.0 , )
a__ ={
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def __UpperCamelCase ( self , lowercase_ , lowercase_=0) -> Tuple:
if str(lowercase_).startswith('mps'):
a__ =torch.manual_seed(lowercase_)
else:
a__ =torch.Generator(device=lowercase_).manual_seed(lowercase_)
a__ ={
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self) -> int:
a__ ='cpu'
a__ =self.get_dummy_components()
a__ =self.pipeline_class(**lowercase_)
a__ =pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
a__ =pipe(**self.get_dummy_inputs(lowercase_))
a__ =output.image_embeds
a__ =pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
a__ =image[0, -10:]
a__ =image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a__ =np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def __UpperCamelCase ( self) -> List[Any]:
a__ =torch_device == 'cpu'
a__ =True
a__ =False
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ , relax_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
@skip_mps
def __UpperCamelCase ( self) -> Optional[int]:
a__ =torch_device == 'cpu'
a__ =False
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
| 20 | 1 |
_lowerCAmelCase: Optional[Any] = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: 'a',
11: 'b',
12: 'c',
13: 'd',
14: 'e',
15: 'f',
}
def _lowercase( __a : float ):
assert type(__a ) in (int, float) and decimal == int(__a )
a__ =int(__a )
a__ =''
a__ =False
if decimal < 0:
a__ =True
decimal *= -1
while decimal > 0:
a__ , a__ =divmod(__a , 16 )
a__ =values[remainder] + hexadecimal
a__ ='0x' + hexadecimal
if negative:
a__ ='-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
from manim import *
class lowercase_ (lowercase__ ):
def __UpperCamelCase ( self) -> List[Any]:
a__ =Rectangle(height=0.5 , width=0.5)
a__ =Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
a__ =[mem.copy() for i in range(6)]
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('CPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(4)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('GPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
gpu.move_to([-1, -1, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Model' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
model.move_to([3, -1.0, 0])
self.add(lowercase_)
a__ =[]
for i, rect in enumerate(lowercase_):
rect.set_stroke(lowercase_)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
a__ =Rectangle(height=0.46 / 4 , width=0.46 / 3).set_stroke(width=0.0).set_fill(lowercase_ , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowercase_)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0)
self.add(lowercase_)
cpu_targs.append(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Loaded Checkpoint' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4)
checkpoint.move_to([3, 0.5, 0])
a__ =Square(side_length=2.2)
key.move_to([-5, 2, 0])
a__ =MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(lowercase_ , lowercase_)
a__ =MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left())
a__ =MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(lowercase_) , Write(lowercase_))
self.play(Write(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1))
a__ =[]
a__ =[]
for i, rect in enumerate(lowercase_):
a__ =fill.copy().set_fill(lowercase_ , opacity=0.7)
target.move_to(lowercase_)
first_animations.append(GrowFromCenter(lowercase_ , run_time=1))
a__ =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5))
self.play(*lowercase_)
self.play(*lowercase_)
self.wait()
| 20 | 1 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_lowerCAmelCase: Union[str, Any] = logging.get_logger('transformers.models.encodec')
_lowerCAmelCase: List[Any] = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
_lowerCAmelCase: str = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
_lowerCAmelCase: Optional[int] = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
_lowerCAmelCase: Dict = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
_lowerCAmelCase: Tuple = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
_lowerCAmelCase: str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_lowerCAmelCase: Dict = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_lowerCAmelCase: Tuple = []
_lowerCAmelCase: str = []
def _lowercase( __a : Any , __a : List[str] , __a : Tuple , __a : Union[str, Any] , __a : Optional[Any] ):
for attribute in key.split('.' ):
a__ =getattr(__a , __a )
if weight_type is not None:
a__ =getattr(__a , __a ).shape
else:
a__ =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a__ =value
elif weight_type == "weight_g":
a__ =value
elif weight_type == "weight_v":
a__ =value
elif weight_type == "bias":
a__ =value
elif weight_type == "running_mean":
a__ =value
elif weight_type == "running_var":
a__ =value
elif weight_type == "num_batches_tracked":
a__ =value
elif weight_type == "weight_ih_l0":
a__ =value
elif weight_type == "weight_hh_l0":
a__ =value
elif weight_type == "bias_ih_l0":
a__ =value
elif weight_type == "bias_hh_l0":
a__ =value
elif weight_type == "weight_ih_l1":
a__ =value
elif weight_type == "weight_hh_l1":
a__ =value
elif weight_type == "bias_ih_l1":
a__ =value
elif weight_type == "bias_hh_l1":
a__ =value
else:
a__ =value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def _lowercase( __a : Optional[int] , __a : Union[str, Any] ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
a__ , a__ =key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowercase( __a : str , __a : int , __a : Tuple ):
a__ =[]
if model_name == "encodec_24khz" or "encodec_32khz":
a__ =MAPPING_24K
elif model_name == "encodec_48khz":
a__ =MAPPING_48K
else:
raise ValueError(f"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(__a , __a ):
logger.info(f"""{name} was ignored""" )
continue
a__ =False
for key, mapped_key in MAPPING.items():
if "*" in key:
a__ , a__ =key.split('.*.' )
if prefix in name and suffix in name:
a__ =suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
a__ =True
if "*" in mapped_key:
a__ =name.split(__a )[0].split('.' )[-2]
a__ =mapped_key.replace('*' , __a )
if "weight_g" in name:
a__ ='weight_g'
elif "weight_v" in name:
a__ ='weight_v'
elif "weight_ih_l0" in name:
a__ ='weight_ih_l0'
elif "weight_hh_l0" in name:
a__ ='weight_hh_l0'
elif "bias_ih_l0" in name:
a__ ='bias_ih_l0'
elif "bias_hh_l0" in name:
a__ ='bias_hh_l0'
elif "weight_ih_l1" in name:
a__ ='weight_ih_l1'
elif "weight_hh_l1" in name:
a__ ='weight_hh_l1'
elif "bias_ih_l1" in name:
a__ ='bias_ih_l1'
elif "bias_hh_l1" in name:
a__ ='bias_hh_l1'
elif "bias" in name:
a__ ='bias'
elif "weight" in name:
a__ ='weight'
elif "running_mean" in name:
a__ ='running_mean'
elif "running_var" in name:
a__ ='running_var'
elif "num_batches_tracked" in name:
a__ ='num_batches_tracked'
else:
a__ =None
set_recursively(__a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(f"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def _lowercase( __a : str , __a : int , __a : str , __a : Tuple=None , __a : Optional[int]=None , ):
if config_path is not None:
a__ =EncodecConfig.from_pretrained(__a )
else:
a__ =EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
a__ =[8, 5, 4, 4]
a__ =[2.2]
a__ =64
a__ =3_2000
a__ =2048
a__ =False
a__ =False
a__ =False
elif model_name == "encodec_48khz":
a__ =[8, 5, 4, 2]
a__ =[3.0, 6.0, 12.0, 24.0]
a__ =4_8000
a__ =2
a__ =False
a__ ='time_group_norm'
a__ =True
a__ =1.0
a__ =0.01
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
a__ =EncodecModel(__a )
a__ =EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__a )
a__ =torch.load(__a )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
a__ =original_checkpoint['best_state']
recursively_load_weights(__a , __a , __a )
model.save_pretrained(__a )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(__a )
model.push_to_hub(__a )
if __name__ == "__main__":
_lowerCAmelCase: str = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_lowerCAmelCase: str = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 20 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowerCAmelCase: Any = sys.version_info >= (3, 10)
def _lowercase( __a : int=None , __a : Any=None ):
return field(default_factory=lambda: default , metadata=__a )
@dataclass
class lowercase_ :
snake_case =42
snake_case =42
snake_case =42
snake_case =42
@dataclass
class lowercase_ :
snake_case =42
snake_case =field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
snake_case =42
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =BasicEnum(self.foo)
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =MixedTypeEnum(self.foo)
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
@dataclass
class lowercase_ :
snake_case =list_field(default=[] )
snake_case =list_field(default=[1, 2, 3] )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case =list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowercase_ :
snake_case =field()
snake_case =field()
snake_case =field()
def __UpperCamelCase ( self) -> List[Any]:
a__ =BasicEnum(self.required_enum)
@dataclass
class lowercase_ :
snake_case =42
snake_case =field()
snake_case =None
snake_case =field(default='toto' , metadata={'help': 'help message'} )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> int:
self.assertEqual(len(a._actions) , len(b._actions))
for x, y in zip(a._actions , b._actions):
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , lowercase_) and yy.get('choices' , lowercase_):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](lowercase_) , yy['type'](lowercase_))
del xx["type"], yy["type"]
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument('--bar' , type=lowercase_ , required=lowercase_)
expected.add_argument('--baz' , type=lowercase_ , required=lowercase_)
expected.add_argument('--flag' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
self.argparsersEqual(lowercase_ , lowercase_)
a__ =['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((a__) , ) =parser.parse_args_into_dataclasses(lowercase_ , look_for_args_file=lowercase_)
self.assertFalse(example.flag)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
expected.add_argument('--baz' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=lowercase_ , dest='baz')
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
a__ =[WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--no_baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto)
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args_into_dataclasses(['--foo', 'titi'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi)
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
a__ =parser.parse_args_into_dataclasses(['--foo', '42'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo)
def __UpperCamelCase ( self) -> List[Any]:
@dataclass
class lowercase_ :
snake_case ="toto"
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
def __UpperCamelCase ( self) -> Optional[int]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=lowercase_)
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(
lowercase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3]) , )
a__ =parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split())
self.assertEqual(lowercase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7]))
def __UpperCamelCase ( self) -> Dict:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=lowercase_ , type=lowercase_)
expected.add_argument('--bar' , default=lowercase_ , type=lowercase_ , help='help message')
expected.add_argument('--baz' , default=lowercase_ , type=lowercase_)
expected.add_argument('--ces' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--des' , nargs='+' , default=[] , type=lowercase_)
a__ =[OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , bar=lowercase_ , baz=lowercase_ , ces=[] , des=[]))
a__ =parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split())
self.assertEqual(lowercase_ , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3]))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=lowercase_ , required=lowercase_)
expected.add_argument('--required_str' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
a__ =parser.parse_dict(lowercase_)[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(lowercase_ , parser.parse_dict , lowercase_ , allow_extra_keys=lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_json')
os.mkdir(lowercase_)
with open(temp_local_path + '.json' , 'w+') as f:
json.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.json'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_yaml')
os.mkdir(lowercase_)
with open(temp_local_path + '.yaml' , 'w+') as f:
yaml.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.yaml'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
self.assertIsNotNone(lowercase_)
| 20 | 1 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: Any = logging.get_logger(__name__)
def _lowercase( __a : Tuple ):
a__ =ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
a__ =128
elif "12-12" in model_name:
a__ =12
a__ =12
elif "14-14" in model_name:
a__ =14
a__ =14
elif "16-16" in model_name:
a__ =16
a__ =16
else:
raise ValueError('Model not supported' )
a__ ='huggingface/label-files'
if "speech-commands" in model_name:
a__ =35
a__ ='speech-commands-v2-id2label.json'
else:
a__ =527
a__ ='audioset-id2label.json'
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
return config
def _lowercase( __a : Any ):
if "module.v" in name:
a__ =name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
a__ =name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
a__ =name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
a__ =name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
a__ =name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
a__ =name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
a__ =name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
a__ =name.replace('attn' , 'attention.self' )
if "norm1" in name:
a__ =name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a__ =name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a__ =name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a__ =name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
a__ =name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
a__ =name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
a__ =name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def _lowercase( __a : Any , __a : Tuple ):
for key in orig_state_dict.copy().keys():
a__ =orig_state_dict.pop(__a )
if "qkv" in key:
a__ =key.split('.' )
a__ =int(key_split[3] )
a__ =config.hidden_size
if "weight" in key:
a__ =val[:dim, :]
a__ =val[dim : dim * 2, :]
a__ =val[-dim:, :]
else:
a__ =val[:dim]
a__ =val[dim : dim * 2]
a__ =val[-dim:]
else:
a__ =val
return orig_state_dict
def _lowercase( __a : Dict ):
a__ =[
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(__a , __a )
@torch.no_grad()
def _lowercase( __a : str , __a : List[Any] , __a : Any=False ):
a__ =get_audio_spectrogram_transformer_config(__a )
a__ ={
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
a__ =model_name_to_url[model_name]
a__ =torch.hub.load_state_dict_from_url(__a , map_location='cpu' )
# remove some keys
remove_keys(__a )
# rename some keys
a__ =convert_state_dict(__a , __a )
# load 🤗 model
a__ =ASTForAudioClassification(__a )
model.eval()
model.load_state_dict(__a )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
a__ =-4.2_67_73_93 if 'speech-commands' not in model_name else -6.84_59_78
a__ =4.5_68_99_74 if 'speech-commands' not in model_name else 5.5_65_45_26
a__ =1024 if 'speech-commands' not in model_name else 128
a__ =ASTFeatureExtractor(mean=__a , std=__a , max_length=__a )
if "speech-commands" in model_name:
a__ =load_dataset('speech_commands' , 'v0.02' , split='validation' )
a__ =dataset[0]['audio']['array']
else:
a__ =hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
a__ , a__ =torchaudio.load(__a )
a__ =waveform.squeeze().numpy()
a__ =feature_extractor(__a , sampling_rate=1_6000 , return_tensors='pt' )
# forward pass
a__ =model(**__a )
a__ =outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
a__ =torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
a__ =torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
a__ =torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
a__ =torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
a__ =torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
a__ =torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
a__ =torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
a__ =torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , __a , atol=1e-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(f"""Saving feature extractor to {pytorch_dump_folder_path}""" )
feature_extractor.save_pretrained(__a )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(f"""MIT/{model_name}""" )
feature_extractor.push_to_hub(f"""MIT/{model_name}""" )
if __name__ == "__main__":
_lowerCAmelCase: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_lowerCAmelCase: Union[str, Any] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 20 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class lowercase_ (lowercase__ ):
snake_case ='autoformer'
snake_case ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , lowercase_ = None , lowercase_ = None , lowercase_ = "student_t" , lowercase_ = "nll" , lowercase_ = 1 , lowercase_ = [1, 2, 3, 4, 5, 6, 7] , lowercase_ = True , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 64 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 32 , lowercase_ = 32 , lowercase_ = "gelu" , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 100 , lowercase_ = 0.02 , lowercase_ = True , lowercase_=True , lowercase_ = 10 , lowercase_ = 25 , lowercase_ = 3 , **lowercase_ , ) -> Union[str, Any]:
# time series specific configuration
a__ =prediction_length
a__ =context_length if context_length is not None else prediction_length
a__ =distribution_output
a__ =loss
a__ =input_size
a__ =num_time_features
a__ =lags_sequence
a__ =scaling
a__ =num_dynamic_real_features
a__ =num_static_real_features
a__ =num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`')
a__ =cardinality
else:
a__ =[0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`')
a__ =embedding_dimension
else:
a__ =[min(50 , (cat + 1) // 2) for cat in self.cardinality]
a__ =num_parallel_samples
# Transformer architecture configuration
a__ =input_size * len(self.lags_sequence) + self._number_of_features
a__ =d_model
a__ =encoder_attention_heads
a__ =decoder_attention_heads
a__ =encoder_ffn_dim
a__ =decoder_ffn_dim
a__ =encoder_layers
a__ =decoder_layers
a__ =dropout
a__ =attention_dropout
a__ =activation_dropout
a__ =encoder_layerdrop
a__ =decoder_layerdrop
a__ =activation_function
a__ =init_std
a__ =use_cache
# Autoformer
a__ =label_length
a__ =moving_average
a__ =autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def __UpperCamelCase ( self) -> int:
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 20 | 1 |
from __future__ import annotations
from typing import Generic, TypeVar
_lowerCAmelCase: Optional[Any] = TypeVar('T')
class lowercase_ (Generic[T] ):
def __init__( self , lowercase_) -> None:
a__ =data
a__ =self
a__ =0
class lowercase_ (Generic[T] ):
def __init__( self) -> None:
# map from node name to the node object
a__ ={}
def __UpperCamelCase ( self , lowercase_) -> None:
# create a new set with x as its member
a__ =DisjointSetTreeNode(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
a__ =self.map[data]
if elem_ref != elem_ref.parent:
a__ =self.find_set(elem_ref.parent.data)
return elem_ref.parent
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
a__ =nodea
else:
a__ =nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> None:
# merge 2 disjoint sets
self.link(self.find_set(lowercase_) , self.find_set(lowercase_))
class lowercase_ (Generic[T] ):
def __init__( self) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
a__ ={}
def __UpperCamelCase ( self , lowercase_) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
a__ ={}
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
# add an edge with the given weight
self.add_node(lowercase_)
self.add_node(lowercase_)
a__ =weight
a__ =weight
def __UpperCamelCase ( self) -> GraphUndirectedWeighted[T]:
a__ =[]
a__ =set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start))
edges.append((start, end, self.connections[start][end]))
edges.sort(key=lambda lowercase_: x[2])
# creating the disjoint set
a__ =DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(lowercase_)
# MST generation
a__ =0
a__ =0
a__ =GraphUndirectedWeighted[T]()
while num_edges < len(self.connections) - 1:
a__ , a__ , a__ =edges[index]
index += 1
a__ =disjoint_set.find_set(lowercase_)
a__ =disjoint_set.find_set(lowercase_)
if parent_u != parent_v:
num_edges += 1
graph.add_edge(lowercase_ , lowercase_ , lowercase_)
disjoint_set.union(lowercase_ , lowercase_)
return graph
| 20 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
class lowercase_ (lowercase__ ):
snake_case =['pixel_values']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
super().__init__(**lowercase_)
a__ =size if size is not None else {'shortest_edge': 256}
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =crop_size if crop_size is not None else {'height': 224, 'width': 224}
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_resize
a__ =size
a__ =resample
a__ =do_center_crop
a__ =crop_size
a__ =do_rescale
a__ =rescale_factor
a__ =do_normalize
a__ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a__ =get_resize_output_image_size(lowercase_ , size=size['shortest_edge'] , default_to_square=lowercase_)
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_) -> np.ndarray:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> Tuple:
a__ =do_resize if do_resize is not None else self.do_resize
a__ =size if size is not None else self.size
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =resample if resample is not None else self.resample
a__ =do_center_crop if do_center_crop is not None else self.do_center_crop
a__ =crop_size if crop_size is not None else self.crop_size
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_rescale if do_rescale is not None else self.do_rescale
a__ =rescale_factor if rescale_factor is not None else self.rescale_factor
a__ =do_normalize if do_normalize is not None else self.do_normalize
a__ =image_mean if image_mean is not None else self.image_mean
a__ =image_std if image_std is not None else self.image_std
a__ =make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a__ =[to_numpy_array(lowercase_) for image in images]
if do_resize:
a__ =[self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_center_crop:
a__ =[self.center_crop(image=lowercase_ , size=lowercase_) for image in images]
if do_rescale:
a__ =[self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_normalize:
a__ =[self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images]
a__ =[to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
a__ ={'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None) -> str:
a__ =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_) != len(lowercase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(lowercase_):
a__ =target_sizes.numpy()
a__ =[]
for idx in range(len(lowercase_)):
a__ =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_)
a__ =resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowercase_)
else:
a__ =logits.argmax(dim=1)
a__ =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 20 | 1 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def _lowercase( ):
a__ =10
a__ =datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
a__ =datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__a ) ),
} , features=__a , )
return dataset
@pytest.fixture(scope='session' )
def _lowercase( __a : Optional[int] , __a : Dict ):
a__ =str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__a )
return filename
# FILE_CONTENT + files
_lowerCAmelCase: Tuple = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def _lowercase( __a : List[Any] ):
a__ =tmp_path_factory.mktemp('data' ) / 'file.txt'
a__ =FILE_CONTENT
with open(__a , 'w' ) as f:
f.write(__a )
return filename
@pytest.fixture(scope='session' )
def _lowercase( __a : List[str] ):
import bza
a__ =tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
a__ =bytes(__a , 'utf-8' )
with bza.open(__a , 'wb' ) as f:
f.write(__a )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : List[Any] ):
import gzip
a__ =str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
a__ =bytes(__a , 'utf-8' )
with gzip.open(__a , 'wb' ) as f:
f.write(__a )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Tuple ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
a__ =tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
a__ =bytes(__a , 'utf-8' )
with lza.frame.open(__a , 'wb' ) as f:
f.write(__a )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : List[Any] , __a : Tuple ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
a__ =tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__a , 'w' ) as archive:
archive.write(__a , arcname=os.path.basename(__a ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : str , __a : Optional[int] ):
import tarfile
a__ =tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__a , 'w' ) as f:
f.add(__a , arcname=os.path.basename(__a ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Union[str, Any] ):
import lzma
a__ =tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
a__ =bytes(__a , 'utf-8' )
with lzma.open(__a , 'wb' ) as f:
f.write(__a )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : str , __a : Any ):
import zipfile
a__ =tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__a , 'w' ) as f:
f.write(__a , arcname=os.path.basename(__a ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Tuple ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
a__ =tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
a__ =bytes(__a , 'utf-8' )
with zstd.open(__a , 'wb' ) as f:
f.write(__a )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : str ):
a__ =tmp_path_factory.mktemp('data' ) / 'file.xml'
a__ =textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__a , 'w' ) as f:
f.write(__a )
return filename
_lowerCAmelCase: str = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
_lowerCAmelCase: Any = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
_lowerCAmelCase: Optional[Any] = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
_lowerCAmelCase: List[Any] = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
_lowerCAmelCase: Optional[int] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def _lowercase( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def _lowercase( __a : Optional[int] ):
a__ =datasets.Dataset.from_dict(__a )
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__a )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Optional[Any] ):
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__a ) ) as con:
a__ =con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : List[Any] ):
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__a , 'w' , newline='' ) as f:
a__ =csv.DictWriter(__a , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__a )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : List[Any] ):
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__a , 'w' , newline='' ) as f:
a__ =csv.DictWriter(__a , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__a )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Optional[int] , __a : Tuple ):
import bza
a__ =tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__a , 'rb' ) as f:
a__ =f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__a , 'wb' ) as f:
f.write(__a )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Optional[int] , __a : Optional[Any] , __a : int ):
a__ =tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__a , 'w' ) as f:
f.write(__a , arcname=os.path.basename(__a ) )
f.write(__a , arcname=os.path.basename(__a ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Dict , __a : str , __a : Tuple ):
a__ =tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__a , 'w' ) as f:
f.write(__a , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__a , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Any , __a : str , __a : Union[str, Any] ):
a__ =tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__a , 'w' ) as f:
f.write(__a , arcname=os.path.join('main_dir' , os.path.basename(__a ) ) )
f.write(__a , arcname=os.path.join('main_dir' , os.path.basename(__a ) ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : List[Any] ):
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
a__ =pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__a , 'wb' ) as f:
a__ =pq.ParquetWriter(__a , schema=__a )
a__ =pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__a ) )] for k in DATA[0]} , schema=__a )
writer.write_table(__a )
writer.close()
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Dict ):
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
a__ ={'data': DATA}
with open(__a , 'w' ) as f:
json.dump(__a , __a )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : str ):
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
a__ ={'data': DATA_DICT_OF_LISTS}
with open(__a , 'w' ) as f:
json.dump(__a , __a )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Optional[Any] ):
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__a , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__a ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Optional[Any] ):
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__a , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__a ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Tuple ):
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__a , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__a ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Dict ):
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__a , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__a ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : List[str] , __a : Union[str, Any] ):
import gzip
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__a , 'rb' ) as orig_file:
with gzip.open(__a , 'wb' ) as zipped_file:
zipped_file.writelines(__a )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Optional[Any] , __a : Tuple ):
import gzip
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__a , 'rb' ) as orig_file:
with gzip.open(__a , 'wb' ) as zipped_file:
zipped_file.writelines(__a )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Optional[int] , __a : Optional[int] , __a : List[Any] ):
a__ =tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__a , 'w' ) as f:
f.write(__a , arcname=os.path.basename(__a ) )
f.write(__a , arcname=os.path.basename(__a ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Any , __a : Optional[Any] , __a : Tuple , __a : Dict ):
a__ =tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__a , 'w' ) as f:
f.write(__a , arcname=os.path.join('nested' , os.path.basename(__a ) ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Optional[int] , __a : Optional[int] , __a : Union[str, Any] ):
a__ =tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__a , 'w' ) as f:
f.write(__a , arcname=os.path.join('main_dir' , os.path.basename(__a ) ) )
f.write(__a , arcname=os.path.join('main_dir' , os.path.basename(__a ) ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Optional[Any] , __a : Union[str, Any] , __a : Any ):
a__ =tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__a , 'w' ) as f:
f.add(__a , arcname=os.path.basename(__a ) )
f.add(__a , arcname=os.path.basename(__a ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : int , __a : Union[str, Any] , __a : str , __a : Union[str, Any] ):
a__ =tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__a , 'w' ) as f:
f.add(__a , arcname=os.path.join('nested' , os.path.basename(__a ) ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Any ):
a__ =['0', '1', '2', '3']
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__a , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Any ):
a__ =['0', '1', '2', '3']
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__a , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Any ):
a__ =['0', '1', '2', '3']
a__ =tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__a , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : List[str] , __a : Optional[Any] , __a : Dict ):
a__ =tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__a , 'w' ) as f:
f.write(__a , arcname=os.path.basename(__a ) )
f.write(__a , arcname=os.path.basename(__a ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : int , __a : Union[str, Any] , __a : Optional[int] ):
a__ =tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__a , 'w' ) as f:
f.write(__a , arcname=os.path.join('main_dir' , os.path.basename(__a ) ) )
f.write(__a , arcname=os.path.join('main_dir' , os.path.basename(__a ) ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : List[Any] , __a : Union[str, Any] , __a : Union[str, Any] ):
a__ =tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__a , 'w' ) as f:
f.write(__a , arcname=os.path.basename('unsupported.ext' ) )
f.write(__a , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Dict ):
a__ ='\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(__a )
return path
@pytest.fixture(scope='session' )
def _lowercase( ):
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def _lowercase( ):
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def _lowercase( __a : Optional[Any] , __a : Optional[Any] ):
a__ =tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__a , 'w' ) as f:
f.write(__a , arcname=os.path.basename(__a ) )
f.write(__a , arcname=os.path.basename(__a ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : List[Any] ):
a__ =tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 20 |
from importlib import import_module
from .logging import get_logger
_lowerCAmelCase: str = get_logger(__name__)
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None) -> Tuple:
a__ =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__'):
setattr(self , lowercase_ , getattr(lowercase_ , lowercase_))
a__ =module._original_module if isinstance(lowercase_ , _PatchedModuleObj) else module
class lowercase_ :
snake_case =[]
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> List[str]:
a__ =obj
a__ =target
a__ =new
a__ =target.split('.')[0]
a__ ={}
a__ =attrs or []
def __enter__( self) -> Optional[int]:
*a__ , a__ =self.target.split('.')
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase_)):
try:
a__ =import_module('.'.join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
a__ =getattr(self.obj , lowercase_)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase_ , _PatchedModuleObj) and obj_attr._original_module is submodule)
):
a__ =obj_attr
# patch at top level
setattr(self.obj , lowercase_ , _PatchedModuleObj(lowercase_ , attrs=self.attrs))
a__ =getattr(self.obj , lowercase_)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase_ , lowercase_ , _PatchedModuleObj(getattr(lowercase_ , lowercase_ , lowercase_) , attrs=self.attrs))
a__ =getattr(lowercase_ , lowercase_)
# finally set the target attribute
setattr(lowercase_ , lowercase_ , self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
a__ =getattr(import_module('.'.join(lowercase_)) , lowercase_)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase_) is attr_value:
a__ =getattr(self.obj , lowercase_)
setattr(self.obj , lowercase_ , self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
a__ =globals()['__builtins__'][target_attr]
setattr(self.obj , lowercase_ , self.new)
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""")
def __exit__( self , *lowercase_) -> str:
for attr in list(self.original):
setattr(self.obj , lowercase_ , self.original.pop(lowercase_))
def __UpperCamelCase ( self) -> Any:
self.__enter__()
self._active_patches.append(self)
def __UpperCamelCase ( self) -> Union[str, Any]:
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 20 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ (lowercase__ ):
snake_case ='ClapFeatureExtractor'
snake_case =('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , lowercase_ , lowercase_) -> List[str]:
super().__init__(lowercase_ , lowercase_)
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_) -> Optional[Any]:
a__ =kwargs.pop('sampling_rate' , lowercase_)
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.')
if text is not None:
a__ =self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_)
if audios is not None:
a__ =self.feature_extractor(
lowercase_ , sampling_rate=lowercase_ , return_tensors=lowercase_ , **lowercase_)
if text is not None and audios is not None:
a__ =audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_) , tensor_type=lowercase_)
def __UpperCamelCase ( self , *lowercase_ , **lowercase_) -> Dict:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def __UpperCamelCase ( self , *lowercase_ , **lowercase_) -> Tuple:
return self.tokenizer.decode(*lowercase_ , **lowercase_)
@property
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =self.tokenizer.model_input_names
a__ =self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
| 20 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_lowerCAmelCase: int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowercase( __a : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
_lowerCAmelCase: str = parser.parse_args()
_lowerCAmelCase: Tuple = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 20 | 1 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _lowercase( __a : Optional[int] , __a : Optional[Any] ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
a__ =flax_key_tuple[:-1] + ('weight',)
a__ =torch.permute(__a , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__a ):
# linear layer
a__ =flax_key_tuple[:-1] + ('weight',)
a__ =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
a__ =flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def _lowercase( __a : Optional[Any] , __a : List[str] , __a : List[Any] ):
if "metadata" in layer:
a__ =layer.split('metadata' )
a__ =''.join(split_layer[0] )[:-1]
a__ =[tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
a__ =layer.split('kvstore' )
a__ =''.join(split_layer[0] )[:-1]
a__ =[tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
a__ =layer.split('/' )
a__ ='/'.join(split_layer[:-1] )
a__ =(split_layer[-1],)
if "kvstore/path" in layer:
a__ =f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
a__ ='file'
else:
a__ =checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _lowercase( __a : Tuple , __a : List[str] ):
a__ =rename_keys(__a )
a__ ={}
for k, v in current_block.items():
a__ =v
a__ =new_current_block
torch.save(__a , __a )
def _lowercase( __a : List[str] , __a : List[str] , __a : int , __a : Any , __a : str = WEIGHTS_NAME ):
a__ =convert_file_size_to_int(__a )
a__ =[]
a__ ={}
a__ =0
a__ =0
os.makedirs(__a , exist_ok=__a )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
a__ =serialization.msgpack_restore(fp.read() )['optimizer']['target']
a__ =flatten_dict(__a , sep='/' )
a__ ={}
for layer in checkpoint_info.keys():
a__ , a__ , a__ =get_key_and_tensorstore_dict(
__a , __a , __a )
if curr_real_layer_name in all_layers:
a__ =content
else:
a__ ={split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
a__ =ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
a__ =torch.tensor(__a )
a__ =raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
a__ , a__ =rename_base_flax_keys(tuple(key.split('/' ) ) , __a )
a__ ='/'.join(__a )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
a__ =os.path.join(
__a , weights_name.replace('.bin' , f"""-{len(__a )+1:05d}-of-???.bin""" ) )
rename_and_save_block(__a , __a )
sharded_state_dicts.append(current_block.keys() )
del current_block
a__ ={}
a__ =0
a__ =raw_weights.to(getattr(__a , __a ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
a__ =os.path.join(__a , weights_name.replace('.bin' , f"""-{len(__a )+1:05d}-of-???.bin""" ) )
rename_and_save_block(__a , __a )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__a ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
a__ ={}
a__ ={}
for idx, shard in enumerate(__a ):
a__ =weights_name.replace(
'.bin' , f"""-{idx+1:05d}-of-{len(__a ):05d}.bin""" ) # len(sharded_state_dicts):05d}
a__ =os.path.join(__a , weights_name.replace('.bin' , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(__a , os.path.join(__a , __a ) )
a__ =shard
for key in shard:
a__ =shard_file
# Add the metadata
a__ ={'total_size': total_size}
a__ ={'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__a , __a ) , 'w' , encoding='utf-8' ) as f:
a__ =json.dumps(__a , indent=2 , sort_keys=__a ) + '\n'
f.write(__a )
return metadata, index
if __name__ == "__main__":
_lowerCAmelCase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
_lowerCAmelCase: Union[str, Any] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _lowercase( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
a__ =SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
a__ =SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
a__ =TaTokenizer.from_pretrained('t5-small' )
a__ ='A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
a__ =tokenizer(__a , return_tensors='pt' ).input_ids
a__ =model.generate(__a , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 20 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase: Tuple = get_logger(__name__)
_lowerCAmelCase: List[str] = Path(__file__).parent / 'model_card_template.md'
_lowerCAmelCase: Any = uuida().hex
_lowerCAmelCase: List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: int = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def _lowercase( __a : Union[Dict, str, None] = None ):
a__ =f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__a , __a ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__a , __a ):
ua += "; " + user_agent
return ua
def _lowercase( __a : str , __a : Optional[str] = None , __a : Optional[str] = None ):
if token is None:
a__ =HfFolder.get_token()
if organization is None:
a__ =whoami(__a )['name']
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def _lowercase( __a : Union[str, Any] , __a : Dict ):
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(__a , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
a__ =args.hub_token if hasattr(__a , 'hub_token' ) else None
a__ =get_full_repo_name(__a , token=__a )
a__ =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__a , model_name=__a , repo_name=__a , dataset_name=args.dataset_name if hasattr(__a , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__a , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__a , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__a , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__a , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__a , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__a , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(__a , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__a , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
a__ =os.path.join(args.output_dir , 'README.md' )
model_card.save(__a )
def _lowercase( __a : Optional[str] , __a : Optional[str] = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
a__ =str(Path(__a ).as_posix() )
a__ =re.search(r'snapshots/([^/]+)/' , __a )
if search is None:
return None
a__ =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase: List[str] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
_lowerCAmelCase: List[str] = os.path.join(hf_cache_home, 'diffusers')
def _lowercase( __a : Optional[str] = None , __a : Optional[str] = None ):
if new_cache_dir is None:
a__ =DIFFUSERS_CACHE
if old_cache_dir is None:
a__ =old_diffusers_cache
a__ =Path(__a ).expanduser()
a__ =Path(__a ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
a__ =new_cache_dir / old_blob_path.relative_to(__a )
new_blob_path.parent.mkdir(parents=__a , exist_ok=__a )
os.replace(__a , __a )
try:
os.symlink(__a , __a )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase: Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
_lowerCAmelCase: int = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase: List[Any] = int(f.read())
except ValueError:
_lowerCAmelCase: Any = 0
if cache_version < 1:
_lowerCAmelCase: str = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase: Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def _lowercase( __a : str , __a : Optional[str] = None ):
if variant is not None:
a__ =weights_name.split('.' )
a__ =splits[:-1] + [variant] + splits[-1:]
a__ ='.'.join(__a )
return weights_name
def _lowercase( __a : Union[str, Any] , *,
__a : Optional[Any] , __a : Optional[Any] , __a : List[Any] , __a : Tuple , __a : Optional[Any] , __a : Dict , __a : str , __a : int , __a : Tuple , __a : Union[str, Any] , __a : int=None , ):
a__ =str(__a )
if os.path.isfile(__a ):
return pretrained_model_name_or_path
elif os.path.isdir(__a ):
if os.path.isfile(os.path.join(__a , __a ) ):
# Load from a PyTorch checkpoint
a__ =os.path.join(__a , __a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__a , __a , __a ) ):
a__ =os.path.join(__a , __a , __a )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__a ).base_version ) >= version.parse('0.20.0' )
):
try:
a__ =hf_hub_download(
__a , filename=_add_variant(__a , __a ) , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __a , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__a , __a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__a , __a )}' so that the correct variant file can be added.""" , __a , )
try:
# 2. Load model file as usual
a__ =hf_hub_download(
__a , filename=__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'this model name. Check the model page at '
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 20 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase: str = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowercase_ (lowercase__ ):
snake_case ='big_bird'
def __init__( self , lowercase_=50358 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu_new" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=4096 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=66 , lowercase_="block_sparse" , lowercase_=True , lowercase_=False , lowercase_=64 , lowercase_=3 , lowercase_=None , **lowercase_ , ) -> Any:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , )
a__ =vocab_size
a__ =max_position_embeddings
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =initializer_range
a__ =type_vocab_size
a__ =layer_norm_eps
a__ =use_cache
a__ =rescale_embeddings
a__ =attention_type
a__ =use_bias
a__ =block_size
a__ =num_random_blocks
a__ =classifier_dropout
class lowercase_ (lowercase__ ):
@property
def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a__ ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__ ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 20 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger()
def _lowercase( __a : int , __a : str , __a : LevitConfig , __a : Path , __a : bool = True ):
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
a__ =timm.create_model('levit_128s' , pretrained=__a )
else:
a__ =timm.create_model('levit_128' , pretrained=__a )
if hidden_sizes == 192:
a__ =timm.create_model('levit_192' , pretrained=__a )
if hidden_sizes == 256:
a__ =timm.create_model('levit_256' , pretrained=__a )
if hidden_sizes == 384:
a__ =timm.create_model('levit_384' , pretrained=__a )
from_model.eval()
a__ =LevitForImageClassificationWithTeacher(__a ).eval()
a__ =OrderedDict()
a__ =from_model.state_dict()
a__ =list(from_model.state_dict().keys() )
a__ =list(our_model.state_dict().keys() )
print(len(__a ) , len(__a ) )
for i in range(len(__a ) ):
a__ =weights[og_keys[i]]
our_model.load_state_dict(__a )
a__ =torch.randn((2, 3, 224, 224) )
a__ =from_model(__a )
a__ =our_model(__a ).logits
assert torch.allclose(__a , __a ), "The model logits don't match the original one."
a__ =name
print(__a )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
a__ =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def _lowercase( __a : Path , __a : str = None , __a : bool = True ):
a__ ='imagenet-1k-id2label.json'
a__ =1000
a__ =(1, num_labels)
a__ ='huggingface/label-files'
a__ =num_labels
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
a__ =partial(__a , num_labels=__a , idalabel=__a , labelaid=__a )
a__ ={
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
a__ ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __a , names_to_config[model_name] , __a , __a )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __a , __a , __a , __a )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_lowerCAmelCase: Union[str, Any] = parser.parse_args()
_lowerCAmelCase: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 20 | 1 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def _lowercase( __a : Dict , __a : Union[str, Any] , __a : List[str] ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , __a )
a__ =datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
a__ =dataset_size < in_memory_max_size
else:
a__ =False
a__ =is_small_dataset(__a )
assert result == expected
| 20 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowerCAmelCase: int = logging.get_logger(__name__)
_lowerCAmelCase: Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_lowerCAmelCase: Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowercase( __a : Optional[Any] ):
a__ ={}
with open(__a , 'r' ) as file:
for line_number, line in enumerate(__a ):
a__ =line.strip()
if line:
a__ =line.split()
a__ =line_number
a__ =words[0]
a__ =value
return result
def _lowercase( __a : Dict , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : str ):
for attribute in key.split('.' ):
a__ =getattr(__a , __a )
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ =getattr(__a , __a ).shape
elif weight_type is not None and weight_type == "param":
a__ =hf_pointer
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =shape_pointer.shape
# let's reduce dimension
a__ =value[0]
else:
a__ =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a__ =value
elif weight_type == "weight_g":
a__ =value
elif weight_type == "weight_v":
a__ =value
elif weight_type == "bias":
a__ =value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =value
else:
a__ =value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _lowercase( __a : Optional[int] , __a : int , __a : Optional[int] , __a : Optional[Any] , __a : List[Any] ):
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ ='.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a__ ='.'.join([key, hf_param_name] )
else:
a__ =key
a__ =value if 'lm_head' in full_key else value[0]
_lowerCAmelCase: Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowercase( __a : Dict , __a : int , __a : int=None , __a : List[str]=None ):
a__ =False
for key, mapped_key in MAPPING.items():
a__ ='wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
a__ =True
if "*" in mapped_key:
a__ =name.split(__a )[0].split('.' )[-2]
a__ =mapped_key.replace('*' , __a )
if "weight_g" in name:
a__ ='weight_g'
elif "weight_v" in name:
a__ ='weight_v'
elif "bias" in name:
a__ ='bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ ='weight'
else:
a__ =None
if hf_dict is not None:
rename_dict(__a , __a , __a , __a , __a )
else:
set_recursively(__a , __a , __a , __a , __a )
return is_used
return is_used
def _lowercase( __a : Union[str, Any] , __a : List[str] , __a : Dict ):
a__ =[]
a__ =fairseq_model.state_dict()
a__ =hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a__ =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == 'group' , )
a__ =True
else:
a__ =load_wavaveca_layer(__a , __a , __a )
if not is_used:
unused_weights.append(__a )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _lowercase( __a : List[Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] ):
a__ =full_name.split('conv_layers.' )[-1]
a__ =name.split('.' )
a__ =int(items[0] )
a__ =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def _lowercase( __a : str , __a : str , __a : Any=None , __a : str=None , __a : Any=True , __a : Union[str, Any]=False ):
if config_path is not None:
a__ =WavaVecaConfig.from_pretrained(__a )
else:
a__ =WavaVecaConfig()
if is_seq_class:
a__ =read_txt_into_dict(__a )
a__ =idalabel
a__ =WavaVecaForSequenceClassification(__a )
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
feature_extractor.save_pretrained(__a )
elif is_finetuned:
if dict_path:
a__ =Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ =target_dict.pad_index
a__ =target_dict.bos_index
a__ =target_dict.eos_index
a__ =len(target_dict.symbols )
a__ =os.path.join(__a , 'vocab.json' )
if not os.path.isdir(__a ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
a__ =target_dict.indices
# fairseq has the <pad> and <s> switched
a__ =0
a__ =1
with open(__a , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__a , __a )
a__ =WavaVecaCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__a , )
a__ =True if config.feat_extract_norm == 'layer' else False
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
a__ =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
a__ =WavaVecaForCTC(__a )
else:
a__ =WavaVecaForPreTraining(__a )
if is_finetuned or is_seq_class:
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
a__ =argparse.Namespace(task='audio_pretraining' )
a__ =fairseq.tasks.setup_task(__a )
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__a )
a__ =model[0].eval()
recursively_load_weights(__a , __a , not is_finetuned )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
_lowerCAmelCase: Tuple = parser.parse_args()
_lowerCAmelCase: Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 20 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
class lowercase_ (lowercase__ ):
snake_case =['pixel_values']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
super().__init__(**lowercase_)
a__ =size if size is not None else {'shortest_edge': 256}
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =crop_size if crop_size is not None else {'height': 224, 'width': 224}
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_resize
a__ =size
a__ =resample
a__ =do_center_crop
a__ =crop_size
a__ =do_rescale
a__ =rescale_factor
a__ =do_normalize
a__ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a__ =get_resize_output_image_size(lowercase_ , size=size['shortest_edge'] , default_to_square=lowercase_)
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_) -> np.ndarray:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> Tuple:
a__ =do_resize if do_resize is not None else self.do_resize
a__ =size if size is not None else self.size
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =resample if resample is not None else self.resample
a__ =do_center_crop if do_center_crop is not None else self.do_center_crop
a__ =crop_size if crop_size is not None else self.crop_size
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_rescale if do_rescale is not None else self.do_rescale
a__ =rescale_factor if rescale_factor is not None else self.rescale_factor
a__ =do_normalize if do_normalize is not None else self.do_normalize
a__ =image_mean if image_mean is not None else self.image_mean
a__ =image_std if image_std is not None else self.image_std
a__ =make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a__ =[to_numpy_array(lowercase_) for image in images]
if do_resize:
a__ =[self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_center_crop:
a__ =[self.center_crop(image=lowercase_ , size=lowercase_) for image in images]
if do_rescale:
a__ =[self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_normalize:
a__ =[self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images]
a__ =[to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
a__ ={'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None) -> str:
a__ =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_) != len(lowercase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(lowercase_):
a__ =target_sizes.numpy()
a__ =[]
for idx in range(len(lowercase_)):
a__ =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_)
a__ =resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowercase_)
else:
a__ =logits.argmax(dim=1)
a__ =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 20 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self) -> Optional[int]:
a__ =AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowercase_).to(lowercase_)
a__ =AutoTokenizer.from_pretrained('google/mt5-small')
a__ =tokenizer('Hello there' , return_tensors='pt').input_ids
a__ =tokenizer('Hi I am' , return_tensors='pt').input_ids
a__ =model(input_ids.to(lowercase_) , labels=labels.to(lowercase_)).loss
a__ =-(labels.shape[-1] * loss.item())
a__ =-84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 20 | 1 |
import argparse
from collections import defaultdict
import yaml
_lowerCAmelCase: int = 'docs/source/en/_toctree.yml'
def _lowercase( __a : Union[str, Any] ):
a__ =defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
a__ =[key for key, value in counts.items() if value > 1]
a__ =[]
for duplicate_key in duplicates:
a__ =list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def _lowercase( __a : List[Any]=False ):
with open(__a , encoding='utf-8' ) as f:
a__ =yaml.safe_load(f.read() )
# Get to the API doc
a__ =0
while content[api_idx]["title"] != "API":
api_idx += 1
a__ =content[api_idx]['sections']
# Then to the model doc
a__ =0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
a__ =api_doc[model_idx]['sections']
a__ =[(idx, section) for idx, section in enumerate(__a ) if 'sections' in section]
a__ =False
for idx, modality_doc in modalities_docs:
a__ =modality_doc['sections']
a__ =clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
a__ =True
if overwrite:
a__ =new_modality_doc
if diff:
if overwrite:
a__ =model_doc
a__ =api_doc
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_lowerCAmelCase: List[str] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_lowerCAmelCase: List[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 20 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> int:
a__ =tempfile.mkdtemp()
a__ =BlipImageProcessor()
a__ =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel')
a__ =BlipProcessor(lowercase_ , lowercase_)
processor.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self , **lowercase_) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).tokenizer
def __UpperCamelCase ( self , **lowercase_) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).image_processor
def __UpperCamelCase ( self) -> Optional[int]:
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self) -> str:
a__ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
a__ =[Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self) -> str:
a__ =BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__ =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ =self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0)
a__ =BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase_)
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =self.prepare_image_inputs()
a__ =image_processor(lowercase_ , return_tensors='np')
a__ =processor(images=lowercase_ , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =processor(text=lowercase_)
a__ =tokenizer(lowercase_ , return_token_type_ids=lowercase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(lowercase_):
processor()
def __UpperCamelCase ( self) -> Tuple:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ =processor.batch_decode(lowercase_)
a__ =tokenizer.batch_decode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
| 20 | 1 |
import random
class lowercase_ :
@staticmethod
def __UpperCamelCase ( lowercase_) -> tuple[list[int], list[int]]:
a__ =[ord(lowercase_) for i in text]
a__ =[]
a__ =[]
for i in plain:
a__ =random.randint(1 , 300)
a__ =(i + k) * k
cipher.append(lowercase_)
key.append(lowercase_)
return cipher, key
@staticmethod
def __UpperCamelCase ( lowercase_ , lowercase_) -> str:
a__ =[]
for i in range(len(lowercase_)):
a__ =int((cipher[i] - (key[i]) ** 2) / key[i])
plain.append(chr(lowercase_))
return "".join(lowercase_)
if __name__ == "__main__":
_lowerCAmelCase , _lowerCAmelCase: Any = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 20 |
def _lowercase( __a : list[int] ):
a__ =len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
a__ , a__ =numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_lowerCAmelCase: Tuple = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase: int = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 20 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=2 , lowercase_=True , lowercase_=False , lowercase_=10 , lowercase_=3 , lowercase_=32 * 8 , lowercase_=32 * 8 , lowercase_=4 , lowercase_=64 , ) -> Union[str, Any]:
a__ =parent
a__ =batch_size
a__ =is_training
a__ =use_auxiliary_loss
a__ =num_queries
a__ =num_channels
a__ =min_size
a__ =max_size
a__ =num_labels
a__ =hidden_dim
a__ =hidden_dim
def __UpperCamelCase ( self) -> int:
a__ =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
lowercase_)
a__ =torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowercase_)
a__ =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowercase_) > 0.5
).float()
a__ =(torch.rand((self.batch_size, self.num_labels) , device=lowercase_) > 0.5).long()
a__ =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __UpperCamelCase ( self) -> List[Any]:
a__ =MaskaFormerConfig(
hidden_size=self.hidden_dim , )
a__ =self.num_queries
a__ =self.num_labels
a__ =[1, 1, 1, 1]
a__ =self.num_channels
a__ =64
a__ =128
a__ =self.hidden_dim
a__ =self.hidden_dim
a__ =self.hidden_dim
return config
def __UpperCamelCase ( self) -> str:
a__ , a__ , a__ , a__ , a__ =self.prepare_config_and_inputs()
a__ ={'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> Union[str, Any]:
a__ =output.encoder_hidden_states
a__ =output.pixel_decoder_hidden_states
a__ =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowercase_) , len(config.backbone_config.depths))
self.parent.assertTrue(len(lowercase_) , len(config.backbone_config.depths))
self.parent.assertTrue(len(lowercase_) , config.decoder_layers)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=False) -> Any:
with torch.no_grad():
a__ =MaskaFormerModel(config=lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(pixel_values=lowercase_ , pixel_mask=lowercase_)
a__ =model(lowercase_ , output_hidden_states=lowercase_)
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(lowercase_ , lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Any:
a__ =MaskaFormerForUniversalSegmentation(config=lowercase_)
model.to(lowercase_)
model.eval()
def comm_check_on_output(lowercase_):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
a__ =model(pixel_values=lowercase_ , pixel_mask=lowercase_)
a__ =model(lowercase_)
comm_check_on_output(lowercase_)
a__ =model(
pixel_values=lowercase_ , pixel_mask=lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_)
comm_check_on_output(lowercase_)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class lowercase_ (lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case ={'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> Optional[int]:
a__ =MaskaFormerModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self) -> List[str]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowercase_)
@unittest.skip(reason='Mask2Former does not use inputs_embeds')
def __UpperCamelCase ( self) -> str:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method')
def __UpperCamelCase ( self) -> Tuple:
pass
@unittest.skip(reason='Mask2Former is not a generative model')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings')
def __UpperCamelCase ( self) -> Tuple:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self) -> str:
pass
def __UpperCamelCase ( self) -> Optional[Any]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
@slow
def __UpperCamelCase ( self) -> Any:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
a__ =MaskaFormerModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def __UpperCamelCase ( self) -> Dict:
a__ =(self.model_tester.min_size,) * 2
a__ ={
'pixel_values': torch.randn((2, 3, *size) , device=lowercase_),
'mask_labels': torch.randn((2, 10, *size) , device=lowercase_),
'class_labels': torch.zeros(2 , 10 , device=lowercase_).long(),
}
a__ =self.model_tester.get_config()
a__ =MaskaFormerForUniversalSegmentation(lowercase_).to(lowercase_)
a__ =model(**lowercase_)
self.assertTrue(outputs.loss is not None)
def __UpperCamelCase ( self) -> Tuple:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_)
def __UpperCamelCase ( self) -> int:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_).to(lowercase_)
a__ =model(**lowercase_ , output_attentions=lowercase_)
self.assertTrue(outputs.attentions is not None)
def __UpperCamelCase ( self) -> Union[str, Any]:
if not self.model_tester.is_training:
return
a__ =self.all_model_classes[1]
a__ , a__ , a__ , a__ , a__ =self.model_tester.prepare_config_and_inputs()
a__ =model_class(lowercase_)
model.to(lowercase_)
model.train()
a__ =model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_).loss
loss.backward()
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =self.all_model_classes[1]
a__ , a__ , a__ , a__ , a__ =self.model_tester.prepare_config_and_inputs()
a__ =True
a__ =True
a__ =model_class(lowercase_).to(lowercase_)
model.train()
a__ =model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_)
a__ =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
a__ =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
a__ =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
a__ =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowercase_)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
_lowerCAmelCase: str = 1e-4
def _lowercase( ):
a__ =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowercase_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self) -> Tuple:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __UpperCamelCase ( self) -> Optional[Any]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None
def __UpperCamelCase ( self) -> str:
a__ =MaskaFormerModel.from_pretrained(self.model_checkpoints).to(lowercase_)
a__ =self.default_image_processor
a__ =prepare_img()
a__ =image_processor(lowercase_ , return_tensors='pt').to(lowercase_)
a__ =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(lowercase_ , (1, 3, 384, 384))
with torch.no_grad():
a__ =model(**lowercase_)
a__ =torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]]).to(lowercase_)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_))
a__ =torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]]).to(lowercase_)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_))
a__ =torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]]).to(lowercase_)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowercase_ , atol=lowercase_))
def __UpperCamelCase ( self) -> Any:
a__ =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(lowercase_).eval()
a__ =self.default_image_processor
a__ =prepare_img()
a__ =image_processor(lowercase_ , return_tensors='pt').to(lowercase_)
a__ =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(lowercase_ , (1, 3, 384, 384))
with torch.no_grad():
a__ =model(**lowercase_)
# masks_queries_logits
a__ =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4))
a__ =[
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
a__ =torch.tensor(lowercase_).to(lowercase_)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase_ , atol=lowercase_))
# class_queries_logits
a__ =outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1))
a__ =torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase_ , atol=lowercase_))
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(lowercase_).eval()
a__ =self.default_image_processor
a__ =image_processor(
[np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors='pt' , )
a__ =inputs['pixel_values'].to(lowercase_)
a__ =[el.to(lowercase_) for el in inputs['mask_labels']]
a__ =[el.to(lowercase_) for el in inputs['class_labels']]
with torch.no_grad():
a__ =model(**lowercase_)
self.assertTrue(outputs.loss is not None)
| 20 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="resnet50" , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=True , lowercase_=True , ) -> Union[str, Any]:
a__ =parent
a__ =out_indices if out_indices is not None else [4]
a__ =stage_names
a__ =out_features
a__ =backbone
a__ =batch_size
a__ =image_size
a__ =num_channels
a__ =use_pretrained_backbone
a__ =is_training
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =self.get_config()
return config, pixel_values
def __UpperCamelCase ( self) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =TimmBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
a__ =model(lowercase_)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __UpperCamelCase ( self) -> str:
a__ =self.prepare_config_and_inputs()
a__ , a__ =config_and_inputs
a__ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(TimmBackbone,) if is_torch_available() else ()
snake_case ={'feature-extraction': TimmBackbone} if is_torch_available() else {}
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =TimmBackboneModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def __UpperCamelCase ( self) -> Dict:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self) -> str:
a__ ='resnet18'
a__ ='microsoft/resnet-18'
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_)
a__ =AutoBackbone.from_pretrained(lowercase_)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3])
a__ =AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> Dict:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.')
def __UpperCamelCase ( self) -> str:
pass
@unittest.skip('Safetensors is not supported by timm.')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self) -> Optional[Any]:
pass
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =True
a__ =self.has_attentions
# no need to test all models as different heads yield the same functionality
a__ =self.all_model_classes[0]
a__ =model_class(lowercase_)
model.to(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model(**lowercase_)
a__ =outputs[0][-1]
# Encoder-/Decoder-only models
a__ =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a__ =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def __UpperCamelCase ( self) -> List[str]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
a__ =copy.deepcopy(lowercase_)
a__ =None
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
a__ =copy.deepcopy(lowercase_)
a__ =False
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
| 20 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase: List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
_lowerCAmelCase: List[Any] = {'target_lang': 'fi', 'source_lang': 'en'}
_lowerCAmelCase: int = '>>zh<<'
_lowerCAmelCase: int = 'Helsinki-NLP/'
if is_torch_available():
_lowerCAmelCase: str = 'pt'
elif is_tf_available():
_lowerCAmelCase: Optional[int] = 'tf'
else:
_lowerCAmelCase: List[Any] = 'jax'
@require_sentencepiece
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =MarianTokenizer
snake_case =False
snake_case =True
def __UpperCamelCase ( self) -> Dict:
super().setUp()
a__ =['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
a__ =dict(zip(lowercase_ , range(len(lowercase_))))
a__ =Path(self.tmpdirname)
save_json(lowercase_ , save_dir / VOCAB_FILES_NAMES['vocab'])
save_json(lowercase_ , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'])
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowercase_ , save_dir / VOCAB_FILES_NAMES['source_spm'])
copyfile(lowercase_ , save_dir / VOCAB_FILES_NAMES['target_spm'])
a__ =MarianTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self , **lowercase_) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase_)
def __UpperCamelCase ( self , lowercase_) -> int:
return (
"This is a test",
"This is a test",
)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ ='</s>'
a__ =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_) , lowercase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_) , lowercase_)
def __UpperCamelCase ( self) -> Dict:
a__ =list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '</s>')
self.assertEqual(vocab_keys[1] , '<unk>')
self.assertEqual(vocab_keys[-1] , '<pad>')
self.assertEqual(len(lowercase_) , 9)
def __UpperCamelCase ( self) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 9)
def __UpperCamelCase ( self) -> Any:
a__ =MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""")
a__ =en_de_tokenizer(['I am a small frog'] , return_tensors=lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
a__ =[38, 121, 14, 697, 38848, 0]
self.assertListEqual(lowercase_ , batch.input_ids[0])
a__ =tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowercase_)
a__ =[x.name for x in Path(lowercase_).glob('*')]
self.assertIn('source.spm' , lowercase_)
MarianTokenizer.from_pretrained(lowercase_)
def __UpperCamelCase ( self) -> Tuple:
a__ =self.get_tokenizer()
a__ =tok(
['I am a small frog' * 1000, 'I am a small frog'] , padding=lowercase_ , truncation=lowercase_ , return_tensors=lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
self.assertEqual(batch.input_ids.shape , (2, 512))
def __UpperCamelCase ( self) -> List[str]:
a__ =self.get_tokenizer()
a__ =tok(['I am a tiny frog', 'I am a small frog'] , padding=lowercase_ , return_tensors=lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
self.assertEqual(batch_smaller.input_ids.shape , (2, 10))
@slow
def __UpperCamelCase ( self) -> Dict:
# fmt: off
a__ ={'input_ids': [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def __UpperCamelCase ( self) -> List[Any]:
a__ =MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs')
a__ ='Tämä on testi'
a__ ='This is a test'
a__ =[76, 7, 2047, 2]
a__ =[69, 12, 11, 940, 2]
a__ =tokenizer(lowercase_).input_ids
self.assertListEqual(lowercase_ , lowercase_)
a__ =tokenizer(text_target=lowercase_).input_ids
self.assertListEqual(lowercase_ , lowercase_)
a__ =tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_)
self.assertEqual(lowercase_ , lowercase_)
| 20 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCAmelCase: Optional[Any] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[str] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
_lowerCAmelCase: Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
_lowerCAmelCase: Any = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _lowercase( __a : Any , __a : Union[str, Any] , __a : Any , __a : Tuple , __a : int ):
for attribute in key.split('.' ):
a__ =getattr(__a , __a )
if weight_type is not None:
a__ =getattr(__a , __a ).shape
else:
a__ =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a__ =value
elif weight_type == "weight_g":
a__ =value
elif weight_type == "weight_v":
a__ =value
elif weight_type == "bias":
a__ =value
elif weight_type == "running_mean":
a__ =value
elif weight_type == "running_var":
a__ =value
elif weight_type == "num_batches_tracked":
a__ =value
elif weight_type == "inv_freq":
a__ =value
else:
a__ =value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _lowercase( __a : Optional[Any] , __a : Tuple , __a : List[str] ):
a__ =[]
a__ =fairseq_model.state_dict()
a__ =hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
a__ =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == 'group' , )
a__ =True
else:
for key, mapped_key in MAPPING.items():
a__ ='wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
a__ =True
if "*" in mapped_key:
a__ =name.split(__a )[0].split('.' )[-2]
a__ =mapped_key.replace('*' , __a )
if "pos_bias_u" in name:
a__ =None
elif "pos_bias_v" in name:
a__ =None
elif "weight_g" in name:
a__ ='weight_g'
elif "weight_v" in name:
a__ ='weight_v'
elif "bias" in name:
a__ ='bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ ='weight'
elif "running_mean" in name:
a__ ='running_mean'
elif "inv_freq" in name:
a__ ='inv_freq'
elif "running_var" in name:
a__ ='running_var'
elif "num_batches_tracked" in name:
a__ ='num_batches_tracked'
else:
a__ =None
set_recursively(__a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _lowercase( __a : Union[str, Any] , __a : Union[str, Any] , __a : Dict , __a : List[str] , __a : int ):
a__ =full_name.split('conv_layers.' )[-1]
a__ =name.split('.' )
a__ =int(items[0] )
a__ =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def _lowercase( __a : List[Any] , __a : str , __a : Dict=None , __a : Optional[int]=None , __a : Union[str, Any]=True ):
if config_path is not None:
a__ =WavaVecaConformerConfig.from_pretrained(__a , hidden_act='swish' )
else:
a__ =WavaVecaConformerConfig()
if "rope" in checkpoint_path:
a__ ='rotary'
if is_finetuned:
if dict_path:
a__ =Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ =target_dict.pad_index
a__ =target_dict.bos_index
a__ =target_dict.eos_index
a__ =len(target_dict.symbols )
a__ =os.path.join(__a , 'vocab.json' )
if not os.path.isdir(__a ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
a__ =target_dict.indices
# fairseq has the <pad> and <s> switched
a__ =0
a__ =1
with open(__a , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__a , __a )
a__ =WavaVecaCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__a , )
a__ =True if config.feat_extract_norm == 'layer' else False
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
a__ =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
a__ =WavaVecaConformerForCTC(__a )
else:
a__ =WavaVecaConformerForPreTraining(__a )
if is_finetuned:
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
a__ =argparse.Namespace(task='audio_pretraining' )
a__ =fairseq.tasks.setup_task(__a )
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__a )
a__ =model[0].eval()
recursively_load_weights(__a , __a , not is_finetuned )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
_lowerCAmelCase: int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 20 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase: str = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowercase_ (lowercase__ ):
snake_case ='big_bird'
def __init__( self , lowercase_=50358 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu_new" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=4096 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=66 , lowercase_="block_sparse" , lowercase_=True , lowercase_=False , lowercase_=64 , lowercase_=3 , lowercase_=None , **lowercase_ , ) -> Any:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , )
a__ =vocab_size
a__ =max_position_embeddings
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =initializer_range
a__ =type_vocab_size
a__ =layer_norm_eps
a__ =use_cache
a__ =rescale_embeddings
a__ =attention_type
a__ =use_bias
a__ =block_size
a__ =num_random_blocks
a__ =classifier_dropout
class lowercase_ (lowercase__ ):
@property
def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a__ ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__ ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 20 | 1 |
from math import factorial
def _lowercase( __a : int , __a : int , __a : float ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__a , __a ) or not isinstance(__a , __a ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
a__ =(prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
a__ =float(factorial(__a ) )
coefficient /= factorial(__a ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 20 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger(__name__)
_lowerCAmelCase: Tuple = torch.device('cpu')
def _lowercase( ):
a__ ='http://images.cocodataset.org/val2017/000000039769.jpg'
a__ =Image.open(requests.get(__a , stream=__a ).raw )
return im
def _lowercase( __a : Optional[Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _lowercase( __a : int , __a : int , __a : Optional[Any] ):
a__ =dct.pop(__a )
a__ =val
def _lowercase( __a : Optional[Any] ):
a__ =[]
for k in state_dict.keys():
a__ =k
if ".pwconv" in k:
a__ =k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a__ =k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a__ =k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a__ =k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a__ =k_new.split('.' )
if ls[2].isdigit():
a__ ='swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__ =k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _lowercase( __a : Union[str, Any] , __a : int , __a : str ):
a__ =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ =1000
a__ ='huggingface/label-files'
a__ ='imagenet-1k-id2label.json'
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ =[3, 3, 6, 4]
a__ =[48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a__ =[3, 3, 9, 6]
a__ =[48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a__ =[4, 3, 10, 5]
a__ =[48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a__ =[4, 4, 12, 6]
a__ =[64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a__ =torch.hub.load_state_dict_from_url(__a , map_location='cpu' , check_hash=__a )
else:
a__ =torch.load(__a , map_location='cpu' )
a__ =checkpoint
a__ =create_rename_keys(__a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__a , __a , __a )
# load HuggingFace model
a__ =SwiftFormerForImageClassification(__a ).eval()
hf_model.load_state_dict(__a )
# prepare test inputs
a__ =prepare_img()
a__ =ViTImageProcessor.from_pretrained('preprocessor_config' )
a__ =processor(images=__a , return_tensors='pt' )
# compare outputs from both models
a__ =get_expected_output(__a )
a__ =hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __a , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_lowerCAmelCase: Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 20 | 1 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
_lowerCAmelCase: Dict = True
except ImportError:
_lowerCAmelCase: Tuple = False
try:
from torch.hub import _get_torch_home
_lowerCAmelCase: Any = _get_torch_home()
except ImportError:
_lowerCAmelCase: int = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
_lowerCAmelCase: Optional[int] = os.path.join(torch_cache_home, 'transformers')
_lowerCAmelCase: List[Any] = 'https://cdn.huggingface.co'
_lowerCAmelCase: List[str] = 'https://s3.amazonaws.com/models.huggingface.co/bert'
_lowerCAmelCase: int = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
_lowerCAmelCase: str = os.path.join(PATH, 'config.yaml')
_lowerCAmelCase: Tuple = os.path.join(PATH, 'attributes.txt')
_lowerCAmelCase: Dict = os.path.join(PATH, 'objects.txt')
_lowerCAmelCase: int = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
_lowerCAmelCase: Union[str, Any] = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
_lowerCAmelCase: List[Any] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
_lowerCAmelCase: Union[str, Any] = 'pytorch_model.bin'
_lowerCAmelCase: Dict = 'config.yaml'
def _lowercase( __a : List[Any]=OBJECTS , __a : List[str]=ATTRIBUTES ):
a__ =[]
with open(__a ) as f:
for object in f.readlines():
vg_classes.append(object.split(',' )[0].lower().strip() )
a__ =[]
with open(__a ) as f:
for object in f.readlines():
vg_attrs.append(object.split(',' )[0].lower().strip() )
return vg_classes, vg_attrs
def _lowercase( __a : Dict ):
a__ =OrderedDict()
with open(__a , 'rb' ) as f:
a__ =pkl.load(__a )['model']
for k in copy.deepcopy(list(ckp.keys() ) ):
a__ =ckp.pop(__a )
if isinstance(__a , np.ndarray ):
a__ =torch.tensor(__a )
else:
assert isinstance(__a , torch.tensor ), type(__a )
a__ =v
return r
class lowercase_ :
snake_case ={}
def __init__( self , lowercase_ , lowercase_ = "root" , lowercase_=0) -> Any:
a__ =name
a__ =level
a__ ={}
for k, v in dictionary.items():
if v is None:
raise ValueError()
a__ =copy.deepcopy(lowercase_)
a__ =copy.deepcopy(lowercase_)
if isinstance(lowercase_ , lowercase_):
a__ =Config(lowercase_ , name=lowercase_ , level=level + 1)
a__ =v
setattr(self , lowercase_ , lowercase_)
a__ =d
def __repr__( self) -> Optional[Any]:
return str(list((self._pointer.keys())))
def __setattr__( self , lowercase_ , lowercase_) -> Dict:
a__ =val
a__ =val
a__ =key.split('.')
a__ =len(lowercase_) - 1
a__ =self._pointer
if len(lowercase_) > 1:
for i, l in enumerate(lowercase_):
if hasattr(self , lowercase_) and isinstance(getattr(self , lowercase_) , lowercase_):
setattr(getattr(self , lowercase_) , '.'.join(levels[i:]) , lowercase_)
if l == last_level:
a__ =val
else:
a__ =pointer[l]
def __UpperCamelCase ( self) -> List[str]:
return self._pointer
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> Any:
with open(F"""{file_name}""" , 'w') as stream:
dump(lowercase_ , lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> Union[str, Any]:
with open(F"""{file_name}""" , 'w') as stream:
json.dump(lowercase_ , lowercase_)
@staticmethod
def __UpperCamelCase ( lowercase_) -> Any:
with open(lowercase_) as stream:
a__ =load(lowercase_ , Loader=lowercase_)
return data
def __str__( self) -> Dict:
a__ =' '
if self._name != "root":
a__ =F"""{t * (self._level-1)}{self._name}:\n"""
else:
a__ =''
a__ =self._level
for i, (k, v) in enumerate(self._pointer.items()):
if isinstance(lowercase_ , lowercase_):
r += F"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += F"""{t * (self._level)}{k}: {v} ({type(lowercase_).__name__})\n"""
a__ =level
return r[:-1]
@classmethod
def __UpperCamelCase ( cls , lowercase_ , **lowercase_) -> Union[str, Any]:
a__ , a__ =cls.get_config_dict(lowercase_ , **lowercase_)
return cls(lowercase_)
@classmethod
def __UpperCamelCase ( cls , lowercase_ , **lowercase_) -> Tuple:
a__ =kwargs.pop('cache_dir' , lowercase_)
a__ =kwargs.pop('force_download' , lowercase_)
a__ =kwargs.pop('resume_download' , lowercase_)
a__ =kwargs.pop('proxies' , lowercase_)
a__ =kwargs.pop('local_files_only' , lowercase_)
if os.path.isdir(lowercase_):
a__ =os.path.join(lowercase_ , lowercase_)
elif os.path.isfile(lowercase_) or is_remote_url(lowercase_):
a__ =pretrained_model_name_or_path
else:
a__ =hf_bucket_url(lowercase_ , filename=lowercase_ , use_cdn=lowercase_)
try:
# Load from URL or cache if already cached
a__ =cached_path(
lowercase_ , cache_dir=lowercase_ , force_download=lowercase_ , proxies=lowercase_ , resume_download=lowercase_ , local_files_only=lowercase_ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
a__ =Config.load_yaml(lowercase_)
except EnvironmentError:
a__ ='Can\'t load config for'
raise EnvironmentError(lowercase_)
if resolved_config_file == config_file:
print('loading configuration file from path')
else:
print('loading configuration file cache')
return Config.load_yaml(lowercase_), kwargs
def _lowercase( __a : List[str] ):
a__ =torch.load('dump.pt' , map_location=in_tensor.device )
a__ =in_tensor.numpy()
a__ =out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__a , __a , rtol=0.01 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(__a , __a , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception('tensors are all good' )
# Hugging face functions below
def _lowercase( __a : Union[str, Any] ):
a__ =urlparse(__a )
return parsed.scheme in ("http", "https")
def _lowercase( __a : str , __a : str , __a : Any=True ):
a__ =CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
a__ ='/' not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def _lowercase( __a : Optional[int] , __a : Dict , __a : str=None , __a : List[str]=0 , __a : Optional[Any]=None , ):
a__ ='python/{}'.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__a , __a ):
ua += "; " + "; ".join('{}/{}'.format(__a , __a ) for k, v in user_agent.items() )
elif isinstance(__a , __a ):
ua += "; " + user_agent
a__ ={'user-agent': ua}
if resume_size > 0:
a__ ='bytes=%d-' % (resume_size,)
a__ =requests.get(__a , stream=__a , proxies=__a , headers=__a )
if response.status_code == 416: # Range not satisfiable
return
a__ =response.headers.get('Content-Length' )
a__ =resume_size + int(__a ) if content_length is not None else None
a__ =tqdm(
unit='B' , unit_scale=__a , total=__a , initial=__a , desc='Downloading' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__a ) )
temp_file.write(__a )
progress.close()
def _lowercase( __a : Optional[int] , __a : int=None , __a : Optional[int]=False , __a : str=None , __a : Optional[Any]=10 , __a : Optional[Any]=False , __a : Optional[Any]=None , __a : int=False , ):
if cache_dir is None:
a__ =TRANSFORMERS_CACHE
if isinstance(__a , __a ):
a__ =str(__a )
os.makedirs(__a , exist_ok=__a )
a__ =None
if not local_files_only:
try:
a__ =requests.head(__a , allow_redirects=__a , proxies=__a , timeout=__a )
if response.status_code == 200:
a__ =response.headers.get('ETag' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
a__ =url_to_filename(__a , __a )
# get cache path to put the file
a__ =os.path.join(__a , __a )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__a ):
return cache_path
else:
a__ =[
file
for file in fnmatch.filter(os.listdir(__a ) , filename + '.*' )
if not file.endswith('.json' ) and not file.endswith('.lock' )
]
if len(__a ) > 0:
return os.path.join(__a , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'Cannot find the requested files in the cached path and outgoing traffic has been'
' disabled. To enable model look-ups and downloads online, set \'local_files_only\''
' to False.' )
return None
# From now on, etag is not None.
if os.path.exists(__a ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
a__ =cache_path + '.lock'
with FileLock(__a ):
# If the download just completed while the lock was activated.
if os.path.exists(__a ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
a__ =cache_path + '.incomplete'
@contextmanager
def _resumable_file_manager():
with open(__a , 'a+b' ) as f:
yield f
a__ =_resumable_file_manager
if os.path.exists(__a ):
a__ =os.stat(__a ).st_size
else:
a__ =0
else:
a__ =partial(tempfile.NamedTemporaryFile , dir=__a , delete=__a )
a__ =0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'%s not found in cache or force_download set to True, downloading to %s' , __a , temp_file.name , )
http_get(
__a , __a , proxies=__a , resume_size=__a , user_agent=__a , )
os.replace(temp_file.name , __a )
a__ ={'url': url, 'etag': etag}
a__ =cache_path + '.json'
with open(__a , 'w' ) as meta_file:
json.dump(__a , __a )
return cache_path
def _lowercase( __a : Dict , __a : List[Any]=None ):
a__ =url.encode('utf-8' )
a__ =shaaaa(__a )
a__ =url_hash.hexdigest()
if etag:
a__ =etag.encode('utf-8' )
a__ =shaaaa(__a )
filename += "." + etag_hash.hexdigest()
if url.endswith('.h5' ):
filename += ".h5"
return filename
def _lowercase( __a : int , __a : int=None , __a : str=False , __a : List[str]=None , __a : Dict=False , __a : Union[str, Any]=None , __a : Any=False , __a : List[Any]=False , __a : Optional[Any]=False , ):
if cache_dir is None:
a__ =TRANSFORMERS_CACHE
if isinstance(__a , __a ):
a__ =str(__a )
if isinstance(__a , __a ):
a__ =str(__a )
if is_remote_url(__a ):
# URL, so get it from the cache (downloading if necessary)
a__ =get_from_cache(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , user_agent=__a , local_files_only=__a , )
elif os.path.exists(__a ):
# File, and it exists.
a__ =url_or_filename
elif urlparse(__a ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('file {} not found'.format(__a ) )
else:
# Something unknown
raise ValueError('unable to parse {} as a URL or as a local path'.format(__a ) )
if extract_compressed_file:
if not is_zipfile(__a ) and not tarfile.is_tarfile(__a ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
a__ , a__ =os.path.split(__a )
a__ =output_file.replace('.' , '-' ) + '-extracted'
a__ =os.path.join(__a , __a )
if os.path.isdir(__a ) and os.listdir(__a ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
a__ =output_path + '.lock'
with FileLock(__a ):
shutil.rmtree(__a , ignore_errors=__a )
os.makedirs(__a )
if is_zipfile(__a ):
with ZipFile(__a , 'r' ) as zip_file:
zip_file.extractall(__a )
zip_file.close()
elif tarfile.is_tarfile(__a ):
a__ =tarfile.open(__a )
tar_file.extractall(__a )
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(__a ) )
return output_path_extracted
return output_path
def _lowercase( __a : List[str] , __a : List[str]="," ):
assert isinstance(__a , __a )
if os.path.isfile(__a ):
with open(__a ) as f:
a__ =eval(f.read() )
else:
a__ =requests.get(__a )
try:
a__ =requests.json()
except Exception:
a__ =req.content.decode()
assert data is not None, "could not connect"
try:
a__ =eval(__a )
except Exception:
a__ =data.split('\n' )
req.close()
return data
def _lowercase( __a : List[Any] ):
a__ =requests.get(__a )
a__ =np.array(Image.open(BytesIO(response.content ) ) )
return img
def _lowercase( __a : List[Any] ):
a__ =url.split('/' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__a )
with open(__a , 'rb' ) as stream:
a__ =pkl.load(__a )
a__ =weights.pop('model' )
a__ ={}
for k, v in model.items():
a__ =torch.from_numpy(__a )
if "running_var" in k:
a__ =torch.tensor([0] )
a__ =k.replace('running_var' , 'num_batches_tracked' )
a__ =zero
return new
def _lowercase( ):
print(f"""{os.path.abspath(os.path.join(__a , os.pardir ) )}/demo.ipynb""" )
def _lowercase( __a : Dict , __a : Any="RGB" ):
assert isinstance(__a , __a )
if os.path.isfile(__a ):
a__ =cva.imread(__a )
else:
a__ =get_image_from_url(__a )
assert img is not None, f"""could not connect to: {im}"""
a__ =cva.cvtColor(__a , cva.COLOR_BGR2RGB )
if input_format == "RGB":
a__ =img[:, :, ::-1]
return img
def _lowercase( __a : Any , __a : str=1 ):
return (images[i : i + batch] for i in range(0 , len(__a ) , __a ))
| 20 |
from __future__ import annotations
from typing import Any
class lowercase_ :
def __init__( self , lowercase_) -> None:
a__ =num_of_nodes
a__ =[]
a__ ={}
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
self.m_edges.append([u_node, v_node, weight])
def __UpperCamelCase ( self , lowercase_) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def __UpperCamelCase ( self , lowercase_) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
a__ =self.find_component(lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
if component_size[u_node] <= component_size[v_node]:
a__ =v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowercase_)
elif component_size[u_node] >= component_size[v_node]:
a__ =self.find_component(lowercase_)
component_size[u_node] += component_size[v_node]
self.set_component(lowercase_)
def __UpperCamelCase ( self) -> None:
a__ =[]
a__ =0
a__ =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
a__ =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
a__ =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowercase_ , lowercase_):
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowercase_ , lowercase_ , lowercase_)
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""")
num_of_components -= 1
a__ =[-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""")
def _lowercase( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 1 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ (unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=10 , lowercase_=[10, 20, 30, 40] , lowercase_=[1, 1, 2, 1] , lowercase_=True , lowercase_=True , lowercase_="relu" , lowercase_=3 , lowercase_=None , ) -> List[str]:
a__ =parent
a__ =batch_size
a__ =image_size
a__ =num_channels
a__ =embeddings_size
a__ =hidden_sizes
a__ =depths
a__ =is_training
a__ =use_labels
a__ =hidden_act
a__ =num_labels
a__ =scope
a__ =len(lowercase_)
def __UpperCamelCase ( self) -> List[str]:
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =self.get_config()
return config, pixel_values
def __UpperCamelCase ( self) -> List[str]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> Any:
a__ =FlaxRegNetModel(config=lowercase_)
a__ =model(lowercase_)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =self.num_labels
a__ =FlaxRegNetForImageClassification(config=lowercase_)
a__ =model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCamelCase ( self) -> Dict:
a__ =self.prepare_config_and_inputs()
a__ , a__ =config_and_inputs
a__ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =(FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> None:
a__ =FlaxRegNetModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self) -> List[Any]:
return
def __UpperCamelCase ( self) -> Dict:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def __UpperCamelCase ( self) -> int:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
@unittest.skip(reason='RegNet does not use inputs_embeds')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip(reason='RegNet does not support input and output embeddings')
def __UpperCamelCase ( self) -> Dict:
pass
def __UpperCamelCase ( self) -> Optional[int]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
def check_hidden_states_output(lowercase_ , lowercase_ , lowercase_):
a__ =model_class(lowercase_)
a__ =model(**self._prepare_for_class(lowercase_ , lowercase_))
a__ =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a__ =self.model_tester.num_stages
self.assertEqual(len(lowercase_) , expected_num_stages + 1)
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ =True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[str]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model_class(lowercase_)
@jax.jit
def model_jitted(lowercase_ , **lowercase_):
return model(pixel_values=lowercase_ , **lowercase_)
with self.subTest('JIT Enabled'):
a__ =model_jitted(**lowercase_).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
a__ =model_jitted(**lowercase_).to_tuple()
self.assertEqual(len(lowercase_) , len(lowercase_))
for jitted_output, output in zip(lowercase_ , lowercase_):
self.assertEqual(jitted_output.shape , output.shape)
def _lowercase( ):
a__ =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class lowercase_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self) -> Optional[Any]:
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040') if is_vision_available() else None
@slow
def __UpperCamelCase ( self) -> List[Any]:
a__ =FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040')
a__ =self.default_image_processor
a__ =prepare_img()
a__ =image_processor(images=lowercase_ , return_tensors='np')
a__ =model(**lowercase_)
# verify the logits
a__ =(1, 1000)
self.assertEqual(outputs.logits.shape , lowercase_)
a__ =jnp.array([-0.41_80, -1.50_51, -3.48_36])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
| 20 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCAmelCase: Union[str, Any] = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
_lowerCAmelCase: Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
_lowerCAmelCase: List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
def __UpperCamelCase ( self) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=True , lowercase_=False) -> Any:
if rouge_types is None:
a__ =['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
a__ =rouge_scorer.RougeScorer(rouge_types=lowercase_ , use_stemmer=lowercase_)
if use_aggregator:
a__ =scoring.BootstrapAggregator()
else:
a__ =[]
for ref, pred in zip(lowercase_ , lowercase_):
a__ =scorer.score(lowercase_ , lowercase_)
if use_aggregator:
aggregator.add_scores(lowercase_)
else:
scores.append(lowercase_)
if use_aggregator:
a__ =aggregator.aggregate()
else:
a__ ={}
for key in scores[0]:
a__ =[score[key] for score in scores]
return result
| 20 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowercase_ (lowercase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_) -> Union[str, Any]:
a__ =dataset
a__ =process
a__ =params
def __len__( self) -> List[str]:
return len(self.dataset)
def __getitem__( self , lowercase_) -> str:
a__ =self.dataset[i]
a__ =self.process(lowercase_ , **self.params)
return processed
class lowercase_ (lowercase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> Optional[int]:
a__ =loader
a__ =infer
a__ =params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
a__ =None
a__ =loader_batch_size
# Internal bookkeeping
a__ =None
a__ =None
def __len__( self) -> List[Any]:
return len(self.loader)
def __iter__( self) -> Dict:
a__ =iter(self.loader)
return self
def __UpperCamelCase ( self) -> Any:
if isinstance(self._loader_batch_data , torch.Tensor):
# Batch data is simple tensor, just fetch the slice
a__ =self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
a__ ={}
for k, element in self._loader_batch_data.items():
if isinstance(lowercase_ , lowercase_):
# Convert ModelOutput to tuple first
a__ =element.to_tuple()
if isinstance(element[0] , torch.Tensor):
a__ =tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0] , np.ndarray):
a__ =tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element)
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowercase_ , lowercase_):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor):
a__ =tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0] , np.ndarray):
a__ =tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element)
continue
if element is None:
# This can happen for optional data that get passed around
a__ =None
elif isinstance(element[self._loader_batch_index] , torch.Tensor):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
a__ =element[self._loader_batch_index].unsqueeze(0)
elif isinstance(element[self._loader_batch_index] , np.ndarray):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
a__ =np.expand_dims(element[self._loader_batch_index] , 0)
else:
# This is typically a list, so no need to `unsqueeze`.
a__ =element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
a__ =self._loader_batch_data.__class__(lowercase_)
self._loader_batch_index += 1
return result
def __UpperCamelCase ( self) -> List[Any]:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
a__ =next(self.iterator)
a__ =self.infer(lowercase_ , **self.params)
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowercase_ , torch.Tensor):
a__ =processed
else:
a__ =list(processed.keys())[0]
a__ =processed[key]
if isinstance(lowercase_ , lowercase_):
a__ =len(lowercase_)
else:
a__ =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
a__ =observed_batch_size
# Setting internal index to unwrap the batch
a__ =processed
a__ =0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowercase_ (lowercase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> Optional[Any]:
super().__init__(lowercase_ , lowercase_ , lowercase_)
def __iter__( self) -> str:
a__ =iter(self.loader)
a__ =None
return self
def __UpperCamelCase ( self) -> Tuple:
if self.subiterator is None:
a__ =self.infer(next(self.iterator) , **self.params)
try:
# Try to return next item
a__ =next(self.subiterator)
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
a__ =self.infer(next(self.iterator) , **self.params)
a__ =next(self.subiterator)
return processed
class lowercase_ (lowercase__ ):
def __iter__( self) -> Dict:
a__ =iter(self.loader)
return self
def __UpperCamelCase ( self) -> str:
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
a__ =False
a__ =[]
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
a__ =self.loader_batch_item()
a__ =item.pop('is_last')
accumulator.append(lowercase_)
if is_last:
return accumulator
while not is_last:
a__ =self.infer(next(self.iterator) , **self.params)
if self.loader_batch_size is not None:
if isinstance(lowercase_ , torch.Tensor):
a__ =processed
else:
a__ =list(processed.keys())[0]
a__ =processed[key]
if isinstance(lowercase_ , lowercase_):
a__ =len(lowercase_)
else:
a__ =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
a__ =observed_batch_size
a__ =processed
a__ =0
while self._loader_batch_index < self.loader_batch_size:
a__ =self.loader_batch_item()
a__ =item.pop('is_last')
accumulator.append(lowercase_)
if is_last:
return accumulator
else:
a__ =processed
a__ =item.pop('is_last')
accumulator.append(lowercase_)
return accumulator
class lowercase_ (lowercase__ ):
def __init__( self , lowercase_ , lowercase_) -> Union[str, Any]:
a__ =dataset
a__ =key
def __len__( self) -> Optional[Any]:
return len(self.dataset)
def __getitem__( self , lowercase_) -> Any:
return self.dataset[i][self.key]
class lowercase_ (lowercase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_) -> str:
a__ =dataset
a__ =keya
a__ =keya
def __len__( self) -> Any:
return len(self.dataset)
def __getitem__( self , lowercase_) -> List[str]:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 20 |
from __future__ import annotations
_lowerCAmelCase: str = '#'
class lowercase_ :
def __init__( self) -> None:
a__ ={}
def __UpperCamelCase ( self , lowercase_) -> None:
a__ =self._trie
for char in text:
if char not in trie:
a__ ={}
a__ =trie[char]
a__ =True
def __UpperCamelCase ( self , lowercase_) -> tuple | list:
a__ =self._trie
for char in prefix:
if char in trie:
a__ =trie[char]
else:
return []
return self._elements(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> tuple:
a__ =[]
for c, v in d.items():
a__ =[' '] if c == END else [(c + s) for s in self._elements(lowercase_)]
result.extend(lowercase_)
return tuple(lowercase_)
_lowerCAmelCase: Optional[int] = Trie()
_lowerCAmelCase: List[str] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _lowercase( __a : str ):
a__ =trie.find_word(__a )
return tuple(string + word for word in suffixes )
def _lowercase( ):
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 | 1 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="resnet50" , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=True , lowercase_=True , ) -> Union[str, Any]:
a__ =parent
a__ =out_indices if out_indices is not None else [4]
a__ =stage_names
a__ =out_features
a__ =backbone
a__ =batch_size
a__ =image_size
a__ =num_channels
a__ =use_pretrained_backbone
a__ =is_training
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =self.get_config()
return config, pixel_values
def __UpperCamelCase ( self) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =TimmBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
a__ =model(lowercase_)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __UpperCamelCase ( self) -> str:
a__ =self.prepare_config_and_inputs()
a__ , a__ =config_and_inputs
a__ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(TimmBackbone,) if is_torch_available() else ()
snake_case ={'feature-extraction': TimmBackbone} if is_torch_available() else {}
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =TimmBackboneModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def __UpperCamelCase ( self) -> Dict:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self) -> str:
a__ ='resnet18'
a__ ='microsoft/resnet-18'
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_)
a__ =AutoBackbone.from_pretrained(lowercase_)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3])
a__ =AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> Dict:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.')
def __UpperCamelCase ( self) -> str:
pass
@unittest.skip('Safetensors is not supported by timm.')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self) -> Optional[Any]:
pass
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =True
a__ =self.has_attentions
# no need to test all models as different heads yield the same functionality
a__ =self.all_model_classes[0]
a__ =model_class(lowercase_)
model.to(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model(**lowercase_)
a__ =outputs[0][-1]
# Encoder-/Decoder-only models
a__ =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a__ =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def __UpperCamelCase ( self) -> List[str]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
a__ =copy.deepcopy(lowercase_)
a__ =None
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
a__ =copy.deepcopy(lowercase_)
a__ =False
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
| 20 |
_lowerCAmelCase: List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _lowercase( ):
a__ =input('Enter message: ' )
a__ =input('Enter key [alphanumeric]: ' )
a__ =input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
a__ ='encrypt'
a__ =encrypt_message(__a , __a )
elif mode.lower().startswith('d' ):
a__ ='decrypt'
a__ =decrypt_message(__a , __a )
print(f"""\n{mode.title()}ed message:""" )
print(__a )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'encrypt' )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'decrypt' )
def _lowercase( __a : str , __a : str , __a : str ):
a__ =[]
a__ =0
a__ =key.upper()
for symbol in message:
a__ =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__a )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__a ):
a__ =0
else:
translated.append(__a )
return "".join(__a )
if __name__ == "__main__":
main()
| 20 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
_lowerCAmelCase: Optional[int] = None
_lowerCAmelCase: str = logging.get_logger(__name__)
_lowerCAmelCase: Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
_lowerCAmelCase: List[Any] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
_lowerCAmelCase: Union[str, Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowercase_ (lowercase__ ):
snake_case =VOCAB_FILES_NAMES
snake_case =PRETRAINED_VOCAB_FILES_MAP
snake_case =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case =['input_ids', 'attention_mask']
snake_case =TaTokenizer
snake_case =[]
def __init__( self , lowercase_=None , lowercase_=None , lowercase_="</s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_=100 , lowercase_=None , **lowercase_ , ) -> List[str]:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
a__ =[F"""<extra_id_{i}>""" for i in range(lowercase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
a__ =len(set(filter(lambda lowercase_: bool('extra_id_' in str(lowercase_)) , lowercase_)))
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens')
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
a__ =vocab_file
a__ =False if not self.vocab_file else True
a__ =extra_ids
@staticmethod
def __UpperCamelCase ( lowercase_ , lowercase_ , lowercase_) -> Dict:
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
a__ =TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , lowercase_ , )
return max_model_length
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowercase_):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
a__ =os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_):
copyfile(self.vocab_file , lowercase_)
logger.info(F"""Copy vocab file to {out_vocab_file}""")
return (out_vocab_file,)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None) -> List[int]:
a__ =token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
a__ =token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None) -> List[int]:
a__ =[self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def __UpperCamelCase ( self) -> Union[str, Any]:
return list(
set(filter(lambda lowercase_: bool(re.search(R'<extra_id_\d+>' , lowercase_)) is not None , self.additional_special_tokens)))
def __UpperCamelCase ( self) -> List[Any]:
return [self.convert_tokens_to_ids(lowercase_) for token in self.get_sentinel_tokens()]
| 20 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20 | 1 |
from ..utils import DummyObject, requires_backends
class lowercase_ (metaclass=lowercase__ ):
snake_case =['keras_nlp']
def __init__( self , *lowercase_ , **lowercase_) -> str:
requires_backends(self , ['keras_nlp'])
| 20 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =KandinskyVaaPriorPipeline
snake_case =['prompt']
snake_case =['prompt', 'negative_prompt']
snake_case =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
snake_case =False
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 32
@property
def __UpperCamelCase ( self) -> Tuple:
return 32
@property
def __UpperCamelCase ( self) -> int:
return self.time_input_dim
@property
def __UpperCamelCase ( self) -> str:
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 100
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
torch.manual_seed(0)
a__ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase_)
@property
def __UpperCamelCase ( self) -> Tuple:
torch.manual_seed(0)
a__ ={
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
a__ =PriorTransformer(**lowercase_)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a__ =nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def __UpperCamelCase ( self) -> Any:
torch.manual_seed(0)
a__ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a__ =CLIPVisionModelWithProjection(lowercase_)
return model
@property
def __UpperCamelCase ( self) -> Optional[int]:
a__ =CLIPImageProcessor(
crop_size=224 , do_center_crop=lowercase_ , do_normalize=lowercase_ , do_resize=lowercase_ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def __UpperCamelCase ( self) -> Any:
a__ =self.dummy_prior
a__ =self.dummy_image_encoder
a__ =self.dummy_text_encoder
a__ =self.dummy_tokenizer
a__ =self.dummy_image_processor
a__ =UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=lowercase_ , clip_sample_range=10.0 , )
a__ ={
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def __UpperCamelCase ( self , lowercase_ , lowercase_=0) -> Tuple:
if str(lowercase_).startswith('mps'):
a__ =torch.manual_seed(lowercase_)
else:
a__ =torch.Generator(device=lowercase_).manual_seed(lowercase_)
a__ ={
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self) -> int:
a__ ='cpu'
a__ =self.get_dummy_components()
a__ =self.pipeline_class(**lowercase_)
a__ =pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
a__ =pipe(**self.get_dummy_inputs(lowercase_))
a__ =output.image_embeds
a__ =pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
a__ =image[0, -10:]
a__ =image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a__ =np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def __UpperCamelCase ( self) -> List[Any]:
a__ =torch_device == 'cpu'
a__ =True
a__ =False
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ , relax_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
@skip_mps
def __UpperCamelCase ( self) -> Optional[int]:
a__ =torch_device == 'cpu'
a__ =False
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
| 20 | 1 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_lowerCAmelCase: Optional[int] = logging.getLogger(__name__)
def _lowercase( __a : Dict , __a : List[Any] ):
a__ =np.argmax(__a , axis=1 )
return np.sum(outputs == labels )
def _lowercase( __a : Union[str, Any] ):
with open(__a , encoding='utf_8' ) as f:
a__ =csv.reader(__a )
a__ =[]
next(__a ) # skip the first line
for line in tqdm(__a ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _lowercase( __a : int , __a : List[Any] , __a : Dict , __a : Tuple , __a : List[Any] , __a : Tuple ):
a__ =[]
for dataset in encoded_datasets:
a__ =len(__a )
a__ =np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
a__ =np.zeros((n_batch, 2) , dtype=np.intaa )
a__ =np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
a__ =np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__a ):
a__ =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
a__ =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
a__ =with_conta
a__ =with_conta
a__ =len(__a ) - 1
a__ =len(__a ) - 1
a__ =with_conta
a__ =with_conta
a__ =mc_label
a__ =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__a ) for t in all_inputs ) )
return tensor_datasets
def _lowercase( ):
a__ =argparse.ArgumentParser()
parser.add_argument('--model_name' , type=__a , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=__a , type=__a , required=__a , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=__a , default='' )
parser.add_argument('--eval_dataset' , type=__a , default='' )
parser.add_argument('--seed' , type=__a , default=42 )
parser.add_argument('--num_train_epochs' , type=__a , default=3 )
parser.add_argument('--train_batch_size' , type=__a , default=8 )
parser.add_argument('--eval_batch_size' , type=__a , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=__a , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=__a , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=__a , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=__a , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=__a , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=__a , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=__a , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=__a , default=0.01 )
parser.add_argument('--lm_coef' , type=__a , default=0.9 )
parser.add_argument('--n_valid' , type=__a , default=374 )
parser.add_argument('--server_ip' , type=__a , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__a , default='' , help='Can be used for distant debugging.' )
a__ =parser.parse_args()
print(__a )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__a )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
a__ =torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
a__ =torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(__a , __a ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
a__ =['_start_', '_delimiter_', '_classify_']
a__ =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__a )
a__ =tokenizer.convert_tokens_to_ids(__a )
a__ =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__a ) )
model.to(__a )
# Load and encode the datasets
def tokenize_and_encode(__a : Any ):
if isinstance(__a , __a ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__a ) )
elif isinstance(__a , __a ):
return obj
return [tokenize_and_encode(__a ) for o in obj]
logger.info('Encoding dataset...' )
a__ =load_rocstories_dataset(args.train_dataset )
a__ =load_rocstories_dataset(args.eval_dataset )
a__ =(train_dataset, eval_dataset)
a__ =tokenize_and_encode(__a )
# Compute the max input length for the Transformer
a__ =model.config.n_positions // 2 - 2
a__ =max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
a__ =min(__a , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
a__ =pre_process_datasets(__a , __a , __a , *__a )
a__ , a__ =tensor_datasets[0], tensor_datasets[1]
a__ =TensorDataset(*__a )
a__ =RandomSampler(__a )
a__ =DataLoader(__a , sampler=__a , batch_size=args.train_batch_size )
a__ =TensorDataset(*__a )
a__ =SequentialSampler(__a )
a__ =DataLoader(__a , sampler=__a , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
a__ =args.max_steps
a__ =args.max_steps // (len(__a ) // args.gradient_accumulation_steps) + 1
else:
a__ =len(__a ) // args.gradient_accumulation_steps * args.num_train_epochs
a__ =list(model.named_parameters() )
a__ =['bias', 'LayerNorm.bias', 'LayerNorm.weight']
a__ =[
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
a__ =AdamW(__a , lr=args.learning_rate , eps=args.adam_epsilon )
a__ =get_linear_schedule_with_warmup(
__a , num_warmup_steps=args.warmup_steps , num_training_steps=__a )
if args.do_train:
a__ , a__ , a__ =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
a__ =0
a__ =0
a__ =tqdm(__a , desc='Training' )
for step, batch in enumerate(__a ):
a__ =tuple(t.to(__a ) for t in batch )
a__ , a__ , a__ , a__ =batch
a__ =model(__a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a )
a__ =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
a__ =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
a__ ='Training loss: {:.2e} lr: {:.2e}'.format(__a , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
a__ =model.module if hasattr(__a , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
a__ =os.path.join(args.output_dir , __a )
a__ =os.path.join(args.output_dir , __a )
torch.save(model_to_save.state_dict() , __a )
model_to_save.config.to_json_file(__a )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
a__ =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
a__ =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__a )
if args.do_eval:
model.eval()
a__ , a__ =0, 0
a__ , a__ =0, 0
for batch in tqdm(__a , desc='Evaluating' ):
a__ =tuple(t.to(__a ) for t in batch )
a__ , a__ , a__ , a__ =batch
with torch.no_grad():
a__ , a__ , a__ , a__ =model(
__a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a )
a__ =mc_logits.detach().cpu().numpy()
a__ =mc_labels.to('cpu' ).numpy()
a__ =accuracy(__a , __a )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
a__ =eval_loss / nb_eval_steps
a__ =eval_accuracy / nb_eval_examples
a__ =tr_loss / nb_tr_steps if args.do_train else None
a__ ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
a__ =os.path.join(args.output_dir , 'eval_results.txt' )
with open(__a , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , __a , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 20 |
from manim import *
class lowercase_ (lowercase__ ):
def __UpperCamelCase ( self) -> List[Any]:
a__ =Rectangle(height=0.5 , width=0.5)
a__ =Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
a__ =[mem.copy() for i in range(6)]
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('CPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(4)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('GPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
gpu.move_to([-1, -1, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Model' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
model.move_to([3, -1.0, 0])
self.add(lowercase_)
a__ =[]
for i, rect in enumerate(lowercase_):
rect.set_stroke(lowercase_)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
a__ =Rectangle(height=0.46 / 4 , width=0.46 / 3).set_stroke(width=0.0).set_fill(lowercase_ , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowercase_)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0)
self.add(lowercase_)
cpu_targs.append(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Loaded Checkpoint' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4)
checkpoint.move_to([3, 0.5, 0])
a__ =Square(side_length=2.2)
key.move_to([-5, 2, 0])
a__ =MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(lowercase_ , lowercase_)
a__ =MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left())
a__ =MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(lowercase_) , Write(lowercase_))
self.play(Write(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1))
a__ =[]
a__ =[]
for i, rect in enumerate(lowercase_):
a__ =fill.copy().set_fill(lowercase_ , opacity=0.7)
target.move_to(lowercase_)
first_animations.append(GrowFromCenter(lowercase_ , run_time=1))
a__ =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5))
self.play(*lowercase_)
self.play(*lowercase_)
self.wait()
| 20 | 1 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _lowercase( __a : Optional[Any] ):
return EnvironmentCommand()
class lowercase_ (lowercase__ ):
@staticmethod
def __UpperCamelCase ( lowercase_) -> Optional[int]:
a__ =parser.add_parser('env')
download_parser.set_defaults(func=lowercase_)
def __UpperCamelCase ( self) -> Dict:
a__ =huggingface_hub.__version__
a__ ='not installed'
a__ ='NA'
if is_torch_available():
import torch
a__ =torch.__version__
a__ =torch.cuda.is_available()
a__ ='not installed'
if is_transformers_available():
import transformers
a__ =transformers.__version__
a__ ='not installed'
if is_accelerate_available():
import accelerate
a__ =accelerate.__version__
a__ ='not installed'
if is_xformers_available():
import xformers
a__ =xformers.__version__
a__ ={
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n')
print(self.format_dict(lowercase_))
return info
@staticmethod
def __UpperCamelCase ( lowercase_) -> List[Any]:
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()]) + "\n"
| 20 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowerCAmelCase: Any = sys.version_info >= (3, 10)
def _lowercase( __a : int=None , __a : Any=None ):
return field(default_factory=lambda: default , metadata=__a )
@dataclass
class lowercase_ :
snake_case =42
snake_case =42
snake_case =42
snake_case =42
@dataclass
class lowercase_ :
snake_case =42
snake_case =field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
snake_case =42
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =BasicEnum(self.foo)
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =MixedTypeEnum(self.foo)
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
@dataclass
class lowercase_ :
snake_case =list_field(default=[] )
snake_case =list_field(default=[1, 2, 3] )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case =list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowercase_ :
snake_case =field()
snake_case =field()
snake_case =field()
def __UpperCamelCase ( self) -> List[Any]:
a__ =BasicEnum(self.required_enum)
@dataclass
class lowercase_ :
snake_case =42
snake_case =field()
snake_case =None
snake_case =field(default='toto' , metadata={'help': 'help message'} )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> int:
self.assertEqual(len(a._actions) , len(b._actions))
for x, y in zip(a._actions , b._actions):
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , lowercase_) and yy.get('choices' , lowercase_):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](lowercase_) , yy['type'](lowercase_))
del xx["type"], yy["type"]
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument('--bar' , type=lowercase_ , required=lowercase_)
expected.add_argument('--baz' , type=lowercase_ , required=lowercase_)
expected.add_argument('--flag' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
self.argparsersEqual(lowercase_ , lowercase_)
a__ =['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((a__) , ) =parser.parse_args_into_dataclasses(lowercase_ , look_for_args_file=lowercase_)
self.assertFalse(example.flag)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
expected.add_argument('--baz' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=lowercase_ , dest='baz')
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
a__ =[WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--no_baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto)
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args_into_dataclasses(['--foo', 'titi'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi)
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
a__ =parser.parse_args_into_dataclasses(['--foo', '42'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo)
def __UpperCamelCase ( self) -> List[Any]:
@dataclass
class lowercase_ :
snake_case ="toto"
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
def __UpperCamelCase ( self) -> Optional[int]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=lowercase_)
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(
lowercase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3]) , )
a__ =parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split())
self.assertEqual(lowercase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7]))
def __UpperCamelCase ( self) -> Dict:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=lowercase_ , type=lowercase_)
expected.add_argument('--bar' , default=lowercase_ , type=lowercase_ , help='help message')
expected.add_argument('--baz' , default=lowercase_ , type=lowercase_)
expected.add_argument('--ces' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--des' , nargs='+' , default=[] , type=lowercase_)
a__ =[OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , bar=lowercase_ , baz=lowercase_ , ces=[] , des=[]))
a__ =parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split())
self.assertEqual(lowercase_ , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3]))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=lowercase_ , required=lowercase_)
expected.add_argument('--required_str' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
a__ =parser.parse_dict(lowercase_)[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(lowercase_ , parser.parse_dict , lowercase_ , allow_extra_keys=lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_json')
os.mkdir(lowercase_)
with open(temp_local_path + '.json' , 'w+') as f:
json.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.json'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_yaml')
os.mkdir(lowercase_)
with open(temp_local_path + '.yaml' , 'w+') as f:
yaml.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.yaml'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
self.assertIsNotNone(lowercase_)
| 20 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self) -> Optional[int]:
a__ =AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowercase_).to(lowercase_)
a__ =AutoTokenizer.from_pretrained('google/mt5-small')
a__ =tokenizer('Hello there' , return_tensors='pt').input_ids
a__ =tokenizer('Hi I am' , return_tensors='pt').input_ids
a__ =model(input_ids.to(lowercase_) , labels=labels.to(lowercase_)).loss
a__ =-(labels.shape[-1] * loss.item())
a__ =-84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 20 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class lowercase_ (lowercase__ ):
snake_case ='autoformer'
snake_case ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , lowercase_ = None , lowercase_ = None , lowercase_ = "student_t" , lowercase_ = "nll" , lowercase_ = 1 , lowercase_ = [1, 2, 3, 4, 5, 6, 7] , lowercase_ = True , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 64 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 32 , lowercase_ = 32 , lowercase_ = "gelu" , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 100 , lowercase_ = 0.02 , lowercase_ = True , lowercase_=True , lowercase_ = 10 , lowercase_ = 25 , lowercase_ = 3 , **lowercase_ , ) -> Union[str, Any]:
# time series specific configuration
a__ =prediction_length
a__ =context_length if context_length is not None else prediction_length
a__ =distribution_output
a__ =loss
a__ =input_size
a__ =num_time_features
a__ =lags_sequence
a__ =scaling
a__ =num_dynamic_real_features
a__ =num_static_real_features
a__ =num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`')
a__ =cardinality
else:
a__ =[0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`')
a__ =embedding_dimension
else:
a__ =[min(50 , (cat + 1) // 2) for cat in self.cardinality]
a__ =num_parallel_samples
# Transformer architecture configuration
a__ =input_size * len(self.lags_sequence) + self._number_of_features
a__ =d_model
a__ =encoder_attention_heads
a__ =decoder_attention_heads
a__ =encoder_ffn_dim
a__ =decoder_ffn_dim
a__ =encoder_layers
a__ =decoder_layers
a__ =dropout
a__ =attention_dropout
a__ =activation_dropout
a__ =encoder_layerdrop
a__ =decoder_layerdrop
a__ =activation_function
a__ =init_std
a__ =use_cache
# Autoformer
a__ =label_length
a__ =moving_average
a__ =autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def __UpperCamelCase ( self) -> int:
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 20 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowercase_ (lowercase__ ):
def __init__( self , lowercase_ , lowercase_) -> Tuple:
a__ =params
a__ =np.array(lowercase_)
a__ =np.array([len(lowercase_) for t in data])
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , lowercase_) -> List[str]:
return (self.token_ids[index], self.lengths[index])
def __len__( self) -> Any:
return len(self.lengths)
def __UpperCamelCase ( self) -> Any:
assert len(self.token_ids) == len(self.lengths)
assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths)))
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =self.params.max_model_input_size
a__ =self.lengths > max_len
logger.info(F"""Splitting {sum(lowercase_)} too long sequences.""")
def divide_chunks(lowercase_ , lowercase_):
return [l[i : i + n] for i in range(0 , len(lowercase_) , lowercase_)]
a__ =[]
a__ =[]
if self.params.mlm:
a__ , a__ =self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
a__ , a__ =self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_)
new_lengths.append(len_)
else:
a__ =[]
for sub_s in divide_chunks(seq_ , max_len - 2):
if sub_s[0] != cls_id:
a__ =np.insert(lowercase_ , 0 , lowercase_)
if sub_s[-1] != sep_id:
a__ =np.insert(lowercase_ , len(lowercase_) , lowercase_)
assert len(lowercase_) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowercase_)
new_tok_ids.extend(lowercase_)
new_lengths.extend([len(lowercase_) for l in sub_seqs])
a__ =np.array(lowercase_)
a__ =np.array(lowercase_)
def __UpperCamelCase ( self) -> Tuple:
a__ =len(self)
a__ =self.lengths > 11
a__ =self.token_ids[indices]
a__ =self.lengths[indices]
a__ =len(self)
logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""")
def __UpperCamelCase ( self) -> Any:
if "unk_token" not in self.params.special_tok_ids:
return
else:
a__ =self.params.special_tok_ids['unk_token']
a__ =len(self)
a__ =np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids])
a__ =(unk_occs / self.lengths) < 0.5
a__ =self.token_ids[indices]
a__ =self.lengths[indices]
a__ =len(self)
logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""")
def __UpperCamelCase ( self) -> List[str]:
if not self.params.is_master:
return
logger.info(F"""{len(self)} sequences""")
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __UpperCamelCase ( self , lowercase_) -> str:
a__ =[t[0] for t in batch]
a__ =[t[1] for t in batch]
assert len(lowercase_) == len(lowercase_)
# Max for paddings
a__ =max(lowercase_)
# Pad token ids
if self.params.mlm:
a__ =self.params.special_tok_ids['pad_token']
else:
a__ =self.params.special_tok_ids['unk_token']
a__ =[list(t.astype(lowercase_)) + [pad_idx] * (max_seq_len_ - len(lowercase_)) for t in token_ids]
assert len(tk_) == len(lowercase_)
assert all(len(lowercase_) == max_seq_len_ for t in tk_)
a__ =torch.tensor(tk_) # (bs, max_seq_len_)
a__ =torch.tensor(lowercase_) # (bs)
return tk_t, lg_t
| 20 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
class lowercase_ (lowercase__ ):
snake_case =['pixel_values']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
super().__init__(**lowercase_)
a__ =size if size is not None else {'shortest_edge': 256}
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =crop_size if crop_size is not None else {'height': 224, 'width': 224}
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_resize
a__ =size
a__ =resample
a__ =do_center_crop
a__ =crop_size
a__ =do_rescale
a__ =rescale_factor
a__ =do_normalize
a__ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a__ =get_resize_output_image_size(lowercase_ , size=size['shortest_edge'] , default_to_square=lowercase_)
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_) -> np.ndarray:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> Tuple:
a__ =do_resize if do_resize is not None else self.do_resize
a__ =size if size is not None else self.size
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =resample if resample is not None else self.resample
a__ =do_center_crop if do_center_crop is not None else self.do_center_crop
a__ =crop_size if crop_size is not None else self.crop_size
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_rescale if do_rescale is not None else self.do_rescale
a__ =rescale_factor if rescale_factor is not None else self.rescale_factor
a__ =do_normalize if do_normalize is not None else self.do_normalize
a__ =image_mean if image_mean is not None else self.image_mean
a__ =image_std if image_std is not None else self.image_std
a__ =make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a__ =[to_numpy_array(lowercase_) for image in images]
if do_resize:
a__ =[self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_center_crop:
a__ =[self.center_crop(image=lowercase_ , size=lowercase_) for image in images]
if do_rescale:
a__ =[self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_normalize:
a__ =[self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images]
a__ =[to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
a__ ={'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None) -> str:
a__ =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_) != len(lowercase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(lowercase_):
a__ =target_sizes.numpy()
a__ =[]
for idx in range(len(lowercase_)):
a__ =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_)
a__ =resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowercase_)
else:
a__ =logits.argmax(dim=1)
a__ =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 20 | 1 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20 |
from importlib import import_module
from .logging import get_logger
_lowerCAmelCase: str = get_logger(__name__)
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None) -> Tuple:
a__ =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__'):
setattr(self , lowercase_ , getattr(lowercase_ , lowercase_))
a__ =module._original_module if isinstance(lowercase_ , _PatchedModuleObj) else module
class lowercase_ :
snake_case =[]
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> List[str]:
a__ =obj
a__ =target
a__ =new
a__ =target.split('.')[0]
a__ ={}
a__ =attrs or []
def __enter__( self) -> Optional[int]:
*a__ , a__ =self.target.split('.')
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase_)):
try:
a__ =import_module('.'.join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
a__ =getattr(self.obj , lowercase_)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase_ , _PatchedModuleObj) and obj_attr._original_module is submodule)
):
a__ =obj_attr
# patch at top level
setattr(self.obj , lowercase_ , _PatchedModuleObj(lowercase_ , attrs=self.attrs))
a__ =getattr(self.obj , lowercase_)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase_ , lowercase_ , _PatchedModuleObj(getattr(lowercase_ , lowercase_ , lowercase_) , attrs=self.attrs))
a__ =getattr(lowercase_ , lowercase_)
# finally set the target attribute
setattr(lowercase_ , lowercase_ , self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
a__ =getattr(import_module('.'.join(lowercase_)) , lowercase_)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase_) is attr_value:
a__ =getattr(self.obj , lowercase_)
setattr(self.obj , lowercase_ , self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
a__ =globals()['__builtins__'][target_attr]
setattr(self.obj , lowercase_ , self.new)
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""")
def __exit__( self , *lowercase_) -> str:
for attr in list(self.original):
setattr(self.obj , lowercase_ , self.original.pop(lowercase_))
def __UpperCamelCase ( self) -> Any:
self.__enter__()
self._active_patches.append(self)
def __UpperCamelCase ( self) -> Union[str, Any]:
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 20 | 1 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=14 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=99 , lowercase_=32 , lowercase_=4 , lowercase_=4 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=0.02 , ) -> int:
a__ =parent
a__ =batch_size
a__ =seq_length
a__ =is_training
a__ =use_input_mask
a__ =use_token_type_ids
a__ =use_labels
a__ =vocab_size
a__ =hidden_size
a__ =rotary_dim
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =max_position_embeddings
a__ =initializer_range
a__ =None
a__ =vocab_size - 1
a__ =vocab_size - 1
a__ =vocab_size - 1
def __UpperCamelCase ( self) -> List[Any]:
a__ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__ =None
if self.use_input_mask:
a__ =random_attention_mask([self.batch_size, self.seq_length])
a__ =GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowercase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __UpperCamelCase ( self) -> str:
a__ =self.prepare_config_and_inputs()
a__ , a__ , a__ =config_and_inputs
a__ ={'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Any:
a__ =20
a__ =model_class_name(lowercase_)
a__ =model.init_cache(input_ids.shape[0] , lowercase_)
a__ =jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4')
a__ =jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
a__ =model(
input_ids[:, :-1] , attention_mask=lowercase_ , past_key_values=lowercase_ , position_ids=lowercase_ , )
a__ =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4')
a__ =model(
input_ids[:, -1:] , attention_mask=lowercase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowercase_ , )
a__ =model(lowercase_)
a__ =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""")
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> str:
a__ =20
a__ =model_class_name(lowercase_)
a__ =jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , )
a__ =model.init_cache(input_ids.shape[0] , lowercase_)
a__ =jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
a__ =model(
input_ids[:, :-1] , attention_mask=lowercase_ , past_key_values=lowercase_ , position_ids=lowercase_ , )
a__ =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4')
a__ =model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowercase_ , position_ids=lowercase_ , )
a__ =model(lowercase_ , attention_mask=lowercase_)
a__ =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""")
@require_flax
class lowercase_ (lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
snake_case =(FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __UpperCamelCase ( self) -> List[Any]:
a__ =FlaxGPTJModelTester(self)
def __UpperCamelCase ( self) -> Tuple:
for model_class_name in self.all_model_classes:
a__ , a__ , a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowercase_ , lowercase_ , lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Any:
for model_class_name in self.all_model_classes:
a__ , a__ , a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowercase_ , lowercase_ , lowercase_ , lowercase_)
@tooslow
def __UpperCamelCase ( self) -> Any:
a__ =GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left')
a__ =tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=lowercase_ , truncation=lowercase_)
a__ =FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B')
a__ =False
a__ =model.config.eos_token_id
a__ =jax.jit(model.generate)
a__ =jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id).sequences
a__ =tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_)
a__ =[
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(lowercase_ , lowercase_)
@is_pt_flax_cross_test
def __UpperCamelCase ( self) -> List[str]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ ={k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
a__ =model_class.__name__[4:] # Skip the "Flax" at the beginning
a__ =getattr(lowercase_ , lowercase_)
a__ , a__ =pt_inputs['input_ids'].shape
a__ =np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(lowercase_):
a__ =0
a__ =1
a__ =0
a__ =1
a__ =pt_model_class(lowercase_).eval()
a__ =model_class(lowercase_ , dtype=jnp.floataa)
a__ =convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase_)
a__ =fx_state
with torch.no_grad():
a__ =pt_model(**lowercase_).to_tuple()
a__ =fx_model(**lowercase_).to_tuple()
self.assertEqual(len(lowercase_) , len(lowercase_) , 'Output lengths differ between Flax and PyTorch')
for fx_output, pt_output in zip(lowercase_ , lowercase_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowercase_)
a__ =model_class.from_pretrained(lowercase_ , from_pt=lowercase_)
a__ =fx_model_loaded(**lowercase_).to_tuple()
self.assertEqual(
len(lowercase_) , len(lowercase_) , 'Output lengths differ between Flax and PyTorch')
for fx_output_loaded, pt_output in zip(lowercase_ , lowercase_):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2)
@is_pt_flax_cross_test
def __UpperCamelCase ( self) -> Dict:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ ={k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
a__ =model_class.__name__[4:] # Skip the "Flax" at the beginning
a__ =getattr(lowercase_ , lowercase_)
a__ =pt_model_class(lowercase_).eval()
a__ =model_class(lowercase_ , dtype=jnp.floataa)
a__ =load_flax_weights_in_pytorch_model(lowercase_ , fx_model.params)
a__ , a__ =pt_inputs['input_ids'].shape
a__ =np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(lowercase_):
a__ =0
a__ =1
a__ =0
a__ =1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
a__ =pt_model(**lowercase_).to_tuple()
a__ =fx_model(**lowercase_).to_tuple()
self.assertEqual(len(lowercase_) , len(lowercase_) , 'Output lengths differ between Flax and PyTorch')
for fx_output, pt_output in zip(lowercase_ , lowercase_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowercase_)
a__ =pt_model_class.from_pretrained(lowercase_ , from_flax=lowercase_)
with torch.no_grad():
a__ =pt_model_loaded(**lowercase_).to_tuple()
self.assertEqual(
len(lowercase_) , len(lowercase_) , 'Output lengths differ between Flax and PyTorch')
for fx_output, pt_output in zip(lowercase_ , lowercase_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2)
@tooslow
def __UpperCamelCase ( self) -> Tuple:
for model_class_name in self.all_model_classes:
a__ =model_class_name.from_pretrained('EleutherAI/gpt-j-6B')
a__ =model(np.ones((1, 1)))
self.assertIsNotNone(lowercase_)
| 20 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_lowerCAmelCase: int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowercase( __a : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
_lowerCAmelCase: str = parser.parse_args()
_lowerCAmelCase: Tuple = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 20 | 1 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =DebertaTokenizer
snake_case =True
snake_case =DebertaTokenizerFast
def __UpperCamelCase ( self) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a__ =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
a__ =dict(zip(lowercase_ , range(len(lowercase_))))
a__ =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
a__ ={'unk_token': '[UNK]'}
a__ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a__ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(lowercase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(lowercase_))
def __UpperCamelCase ( self , **lowercase_) -> int:
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_)
def __UpperCamelCase ( self , lowercase_) -> Optional[int]:
a__ ='lower newer'
a__ ='lower newer'
return input_text, output_text
def __UpperCamelCase ( self) -> str:
a__ =self.get_tokenizer()
a__ ='lower newer'
a__ =['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
a__ =tokenizer.tokenize(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
a__ =tokens + [tokenizer.unk_token]
a__ =[0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_) , lowercase_)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.get_tokenizer()
a__ =tokenizer('Hello' , 'World')
a__ =[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , lowercase_)
@slow
def __UpperCamelCase ( self) -> int:
a__ =self.tokenizer_class.from_pretrained('microsoft/deberta-base')
a__ =tokenizer.encode('sequence builders' , add_special_tokens=lowercase_)
a__ =tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase_)
a__ =tokenizer.encode(
'sequence builders' , add_special_tokens=lowercase_ , add_prefix_space=lowercase_)
a__ =tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=lowercase_ , add_prefix_space=lowercase_)
a__ =tokenizer.build_inputs_with_special_tokens(lowercase_)
a__ =tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __UpperCamelCase ( self) -> str:
a__ =[self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class)
for tokenizer_class in tokenizer_classes:
a__ =tokenizer_class.from_pretrained('microsoft/deberta-base')
a__ =[
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
a__ =tokenizer(lowercase_ , padding=lowercase_)
a__ =[tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_) for seq in encoding['input_ids']]
# fmt: off
a__ ={
'input_ids': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
a__ =[
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , lowercase_)
for expected, decoded in zip(lowercase_ , lowercase_):
self.assertEqual(lowercase_ , lowercase_)
| 20 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase: Tuple = get_logger(__name__)
_lowerCAmelCase: List[str] = Path(__file__).parent / 'model_card_template.md'
_lowerCAmelCase: Any = uuida().hex
_lowerCAmelCase: List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: int = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def _lowercase( __a : Union[Dict, str, None] = None ):
a__ =f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__a , __a ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__a , __a ):
ua += "; " + user_agent
return ua
def _lowercase( __a : str , __a : Optional[str] = None , __a : Optional[str] = None ):
if token is None:
a__ =HfFolder.get_token()
if organization is None:
a__ =whoami(__a )['name']
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def _lowercase( __a : Union[str, Any] , __a : Dict ):
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(__a , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
a__ =args.hub_token if hasattr(__a , 'hub_token' ) else None
a__ =get_full_repo_name(__a , token=__a )
a__ =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__a , model_name=__a , repo_name=__a , dataset_name=args.dataset_name if hasattr(__a , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__a , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__a , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__a , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__a , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__a , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__a , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(__a , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__a , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
a__ =os.path.join(args.output_dir , 'README.md' )
model_card.save(__a )
def _lowercase( __a : Optional[str] , __a : Optional[str] = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
a__ =str(Path(__a ).as_posix() )
a__ =re.search(r'snapshots/([^/]+)/' , __a )
if search is None:
return None
a__ =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase: List[str] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
_lowerCAmelCase: List[str] = os.path.join(hf_cache_home, 'diffusers')
def _lowercase( __a : Optional[str] = None , __a : Optional[str] = None ):
if new_cache_dir is None:
a__ =DIFFUSERS_CACHE
if old_cache_dir is None:
a__ =old_diffusers_cache
a__ =Path(__a ).expanduser()
a__ =Path(__a ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
a__ =new_cache_dir / old_blob_path.relative_to(__a )
new_blob_path.parent.mkdir(parents=__a , exist_ok=__a )
os.replace(__a , __a )
try:
os.symlink(__a , __a )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase: Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
_lowerCAmelCase: int = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase: List[Any] = int(f.read())
except ValueError:
_lowerCAmelCase: Any = 0
if cache_version < 1:
_lowerCAmelCase: str = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase: Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def _lowercase( __a : str , __a : Optional[str] = None ):
if variant is not None:
a__ =weights_name.split('.' )
a__ =splits[:-1] + [variant] + splits[-1:]
a__ ='.'.join(__a )
return weights_name
def _lowercase( __a : Union[str, Any] , *,
__a : Optional[Any] , __a : Optional[Any] , __a : List[Any] , __a : Tuple , __a : Optional[Any] , __a : Dict , __a : str , __a : int , __a : Tuple , __a : Union[str, Any] , __a : int=None , ):
a__ =str(__a )
if os.path.isfile(__a ):
return pretrained_model_name_or_path
elif os.path.isdir(__a ):
if os.path.isfile(os.path.join(__a , __a ) ):
# Load from a PyTorch checkpoint
a__ =os.path.join(__a , __a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__a , __a , __a ) ):
a__ =os.path.join(__a , __a , __a )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__a ).base_version ) >= version.parse('0.20.0' )
):
try:
a__ =hf_hub_download(
__a , filename=_add_variant(__a , __a ) , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __a , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__a , __a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__a , __a )}' so that the correct variant file can be added.""" , __a , )
try:
# 2. Load model file as usual
a__ =hf_hub_download(
__a , filename=__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'this model name. Check the model page at '
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 20 | 1 |
import sys
from collections import defaultdict
class lowercase_ :
def __init__( self) -> List[Any]:
a__ =[]
def __UpperCamelCase ( self , lowercase_) -> List[str]:
return self.node_position[vertex]
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> Any:
a__ =pos
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> List[Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
a__ =2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
a__ =2 * start + 1
else:
a__ =2 * start + 2
if heap[smallest_child] < heap[start]:
a__ , a__ =heap[smallest_child], positions[smallest_child]
a__ , a__ =(
heap[start],
positions[start],
)
a__ , a__ =temp, tempa
a__ =self.get_position(positions[smallest_child])
self.set_position(
positions[smallest_child] , self.get_position(positions[start]))
self.set_position(positions[start] , lowercase_)
self.top_to_bottom(lowercase_ , lowercase_ , lowercase_ , lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> str:
a__ =position[index]
while index != 0:
a__ =int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2)
if val < heap[parent]:
a__ =heap[parent]
a__ =position[parent]
self.set_position(position[parent] , lowercase_)
else:
a__ =val
a__ =temp
self.set_position(lowercase_ , lowercase_)
break
a__ =parent
else:
a__ =val
a__ =temp
self.set_position(lowercase_ , 0)
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =len(lowercase_) // 2 - 1
for i in range(lowercase_ , -1 , -1):
self.top_to_bottom(lowercase_ , lowercase_ , len(lowercase_) , lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> Optional[Any]:
a__ =positions[0]
a__ =sys.maxsize
self.top_to_bottom(lowercase_ , 0 , len(lowercase_) , lowercase_)
return temp
def _lowercase( __a : Any ):
a__ =Heap()
a__ =[0] * len(__a )
a__ =[-1] * len(__a ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
a__ =[] # Heap of Distance of vertices from their neighboring vertex
a__ =[]
for vertex in range(len(__a ) ):
distance_tv.append(sys.maxsize )
positions.append(__a )
heap.node_position.append(__a )
a__ =[]
a__ =1
a__ =sys.maxsize
for neighbor, distance in adjacency_list[0]:
a__ =0
a__ =distance
heap.heapify(__a , __a )
for _ in range(1 , len(__a ) ):
a__ =heap.delete_minimum(__a , __a )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
a__ =1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__a )]
):
a__ =distance
heap.bottom_to_top(
__a , heap.get_position(__a ) , __a , __a )
a__ =vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowerCAmelCase: str = int(input('Enter number of edges: ').strip())
_lowerCAmelCase: Optional[int] = defaultdict(list)
for _ in range(edges_number):
_lowerCAmelCase: str = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 20 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger()
def _lowercase( __a : int , __a : str , __a : LevitConfig , __a : Path , __a : bool = True ):
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
a__ =timm.create_model('levit_128s' , pretrained=__a )
else:
a__ =timm.create_model('levit_128' , pretrained=__a )
if hidden_sizes == 192:
a__ =timm.create_model('levit_192' , pretrained=__a )
if hidden_sizes == 256:
a__ =timm.create_model('levit_256' , pretrained=__a )
if hidden_sizes == 384:
a__ =timm.create_model('levit_384' , pretrained=__a )
from_model.eval()
a__ =LevitForImageClassificationWithTeacher(__a ).eval()
a__ =OrderedDict()
a__ =from_model.state_dict()
a__ =list(from_model.state_dict().keys() )
a__ =list(our_model.state_dict().keys() )
print(len(__a ) , len(__a ) )
for i in range(len(__a ) ):
a__ =weights[og_keys[i]]
our_model.load_state_dict(__a )
a__ =torch.randn((2, 3, 224, 224) )
a__ =from_model(__a )
a__ =our_model(__a ).logits
assert torch.allclose(__a , __a ), "The model logits don't match the original one."
a__ =name
print(__a )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
a__ =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def _lowercase( __a : Path , __a : str = None , __a : bool = True ):
a__ ='imagenet-1k-id2label.json'
a__ =1000
a__ =(1, num_labels)
a__ ='huggingface/label-files'
a__ =num_labels
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
a__ =partial(__a , num_labels=__a , idalabel=__a , labelaid=__a )
a__ ={
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
a__ ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __a , names_to_config[model_name] , __a , __a )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __a , __a , __a , __a )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_lowerCAmelCase: Union[str, Any] = parser.parse_args()
_lowerCAmelCase: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 20 | 1 |
from __future__ import annotations
from collections.abc import Callable
_lowerCAmelCase: Dict = list[list[float | int]]
def _lowercase( __a : Matrix , __a : Matrix ):
a__ =len(__a )
a__ =[[0 for _ in range(size + 1 )] for _ in range(__a )]
a__ =42
a__ =42
a__ =42
a__ =42
a__ =42
a__ =42
for row in range(__a ):
for col in range(__a ):
a__ =matrix[row][col]
a__ =vector[row][0]
a__ =0
a__ =0
while row < size and col < size:
# pivoting
a__ =max((abs(augmented[rowa][col] ), rowa) for rowa in range(__a , __a ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
a__ , a__ =augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __a ):
a__ =augmented[rowa][col] / augmented[row][col]
a__ =0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __a ):
for row in range(__a ):
a__ =augmented[row][col] / augmented[col][col]
for cola in range(__a , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__a )
]
def _lowercase( __a : list[int] ):
a__ =len(__a )
a__ =[[0 for _ in range(__a )] for _ in range(__a )]
a__ =[[0] for _ in range(__a )]
a__ =42
a__ =42
a__ =42
a__ =42
for x_val, y_val in enumerate(__a ):
for col in range(__a ):
a__ =(x_val + 1) ** (size - col - 1)
a__ =y_val
a__ =solve(__a , __a )
def interpolated_func(__a : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__a ) )
return interpolated_func
def _lowercase( __a : int ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def _lowercase( __a : Callable[[int], int] = question_function , __a : int = 10 ):
a__ =[func(__a ) for x_val in range(1 , order + 1 )]
a__ =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
a__ =0
a__ =42
a__ =42
for poly in polynomials:
a__ =1
while func(__a ) == poly(__a ):
x_val += 1
ret += poly(__a )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowerCAmelCase: int = logging.get_logger(__name__)
_lowerCAmelCase: Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_lowerCAmelCase: Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowercase( __a : Optional[Any] ):
a__ ={}
with open(__a , 'r' ) as file:
for line_number, line in enumerate(__a ):
a__ =line.strip()
if line:
a__ =line.split()
a__ =line_number
a__ =words[0]
a__ =value
return result
def _lowercase( __a : Dict , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : str ):
for attribute in key.split('.' ):
a__ =getattr(__a , __a )
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ =getattr(__a , __a ).shape
elif weight_type is not None and weight_type == "param":
a__ =hf_pointer
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =shape_pointer.shape
# let's reduce dimension
a__ =value[0]
else:
a__ =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a__ =value
elif weight_type == "weight_g":
a__ =value
elif weight_type == "weight_v":
a__ =value
elif weight_type == "bias":
a__ =value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =value
else:
a__ =value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _lowercase( __a : Optional[int] , __a : int , __a : Optional[int] , __a : Optional[Any] , __a : List[Any] ):
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ ='.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a__ ='.'.join([key, hf_param_name] )
else:
a__ =key
a__ =value if 'lm_head' in full_key else value[0]
_lowerCAmelCase: Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowercase( __a : Dict , __a : int , __a : int=None , __a : List[str]=None ):
a__ =False
for key, mapped_key in MAPPING.items():
a__ ='wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
a__ =True
if "*" in mapped_key:
a__ =name.split(__a )[0].split('.' )[-2]
a__ =mapped_key.replace('*' , __a )
if "weight_g" in name:
a__ ='weight_g'
elif "weight_v" in name:
a__ ='weight_v'
elif "bias" in name:
a__ ='bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ ='weight'
else:
a__ =None
if hf_dict is not None:
rename_dict(__a , __a , __a , __a , __a )
else:
set_recursively(__a , __a , __a , __a , __a )
return is_used
return is_used
def _lowercase( __a : Union[str, Any] , __a : List[str] , __a : Dict ):
a__ =[]
a__ =fairseq_model.state_dict()
a__ =hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a__ =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == 'group' , )
a__ =True
else:
a__ =load_wavaveca_layer(__a , __a , __a )
if not is_used:
unused_weights.append(__a )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _lowercase( __a : List[Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] ):
a__ =full_name.split('conv_layers.' )[-1]
a__ =name.split('.' )
a__ =int(items[0] )
a__ =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def _lowercase( __a : str , __a : str , __a : Any=None , __a : str=None , __a : Any=True , __a : Union[str, Any]=False ):
if config_path is not None:
a__ =WavaVecaConfig.from_pretrained(__a )
else:
a__ =WavaVecaConfig()
if is_seq_class:
a__ =read_txt_into_dict(__a )
a__ =idalabel
a__ =WavaVecaForSequenceClassification(__a )
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
feature_extractor.save_pretrained(__a )
elif is_finetuned:
if dict_path:
a__ =Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ =target_dict.pad_index
a__ =target_dict.bos_index
a__ =target_dict.eos_index
a__ =len(target_dict.symbols )
a__ =os.path.join(__a , 'vocab.json' )
if not os.path.isdir(__a ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
a__ =target_dict.indices
# fairseq has the <pad> and <s> switched
a__ =0
a__ =1
with open(__a , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__a , __a )
a__ =WavaVecaCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__a , )
a__ =True if config.feat_extract_norm == 'layer' else False
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
a__ =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
a__ =WavaVecaForCTC(__a )
else:
a__ =WavaVecaForPreTraining(__a )
if is_finetuned or is_seq_class:
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
a__ =argparse.Namespace(task='audio_pretraining' )
a__ =fairseq.tasks.setup_task(__a )
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__a )
a__ =model[0].eval()
recursively_load_weights(__a , __a , not is_finetuned )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
_lowerCAmelCase: Tuple = parser.parse_args()
_lowerCAmelCase: Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 20 | 1 |
def _lowercase( __a : int ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self) -> Optional[int]:
a__ =AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowercase_).to(lowercase_)
a__ =AutoTokenizer.from_pretrained('google/mt5-small')
a__ =tokenizer('Hello there' , return_tensors='pt').input_ids
a__ =tokenizer('Hi I am' , return_tensors='pt').input_ids
a__ =model(input_ids.to(lowercase_) , labels=labels.to(lowercase_)).loss
a__ =-(labels.shape[-1] * loss.item())
a__ =-84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 20 | 1 |
import argparse
from collections import defaultdict
def _lowercase( __a : Union[str, Any] , __a : Dict , __a : Union[str, Any] , __a : Optional[int] , __a : Optional[int] ):
a__ =f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__a , 'r' ) as f:
a__ =f.readlines()
a__ =f"""class {class_name}("""
a__ =f"""{4 * ' '}def {test_name}("""
a__ =f"""{8 * ' '}{correct_line.split()[0]}"""
a__ =f"""{16 * ' '}{correct_line.split()[0]}"""
a__ =False
a__ =False
a__ =False
a__ =False
a__ =0
a__ =0
a__ =[]
for line in lines:
if line.startswith(__a ):
a__ =True
elif in_class and line.startswith(__a ):
a__ =True
elif in_class and in_func and (line.startswith(__a ) or line.startswith(__a )):
a__ =len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
a__ =True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
a__ =True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * ' '}{correct_line}""" )
a__ =a__ =a__ =a__ =False
else:
new_lines.append(__a )
with open(__a , 'w' ) as f:
for line in new_lines:
f.write(__a )
def _lowercase( __a : int , __a : Union[str, Any]=None ):
if fail is not None:
with open(__a , 'r' ) as f:
a__ ={l.strip() for l in f.readlines()}
else:
a__ =None
with open(__a , 'r' ) as f:
a__ =f.readlines()
a__ =defaultdict(__a )
for line in correct_lines:
a__ , a__ , a__ , a__ =line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__a , __a , __a , __a , __a )
if __name__ == "__main__":
_lowerCAmelCase: Tuple = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
_lowerCAmelCase: int = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 20 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> int:
a__ =tempfile.mkdtemp()
a__ =BlipImageProcessor()
a__ =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel')
a__ =BlipProcessor(lowercase_ , lowercase_)
processor.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self , **lowercase_) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).tokenizer
def __UpperCamelCase ( self , **lowercase_) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).image_processor
def __UpperCamelCase ( self) -> Optional[int]:
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self) -> str:
a__ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
a__ =[Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self) -> str:
a__ =BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__ =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ =self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0)
a__ =BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase_)
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =self.prepare_image_inputs()
a__ =image_processor(lowercase_ , return_tensors='np')
a__ =processor(images=lowercase_ , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =processor(text=lowercase_)
a__ =tokenizer(lowercase_ , return_token_type_ids=lowercase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(lowercase_):
processor()
def __UpperCamelCase ( self) -> Tuple:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ =processor.batch_decode(lowercase_)
a__ =tokenizer.batch_decode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
| 20 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_lowerCAmelCase: Any = logging.get_logger(__name__)
_lowerCAmelCase: Tuple = {'vocab_file': 'spiece.model'}
_lowerCAmelCase: List[Any] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
_lowerCAmelCase: str = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
_lowerCAmelCase: Optional[Any] = '▁'
class lowercase_ (lowercase__ ):
snake_case =VOCAB_FILES_NAMES
snake_case =PRETRAINED_VOCAB_FILES_MAP
snake_case =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case =['input_ids', 'attention_mask']
def __init__( self , lowercase_ , lowercase_="</s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_=100 , lowercase_=None , lowercase_ = None , lowercase_=True , **lowercase_ , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
a__ =[F"""<extra_id_{i}>""" for i in range(lowercase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
a__ =len(set(filter(lambda lowercase_: bool('extra_id' in str(lowercase_)) , lowercase_)))
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens')
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565')
a__ =legacy
a__ ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase_ , **lowercase_ , )
a__ =vocab_file
a__ =extra_ids
a__ =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowercase_)
@staticmethod
def __UpperCamelCase ( lowercase_ , lowercase_ , lowercase_) -> Any:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
a__ =TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , lowercase_ , )
return max_model_length
@property
def __UpperCamelCase ( self) -> int:
return self.sp_model.get_piece_size() + self._extra_ids
def __UpperCamelCase ( self) -> Optional[Any]:
a__ ={self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase_)) + [1]
return ([0] * len(lowercase_)) + [1] + ([0] * len(lowercase_)) + [1]
def __UpperCamelCase ( self) -> Union[str, Any]:
return list(
set(filter(lambda lowercase_: bool(re.search(R'<extra_id_\d+>' , lowercase_)) is not None , self.additional_special_tokens)))
def __UpperCamelCase ( self) -> List[Any]:
return [self._convert_token_to_id(lowercase_) for token in self.get_sentinel_tokens()]
def __UpperCamelCase ( self , lowercase_) -> List[int]:
if len(lowercase_) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
' eos tokens being added.')
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None) -> List[int]:
a__ =[self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None) -> List[int]:
a__ =self._add_eos_if_not_present(lowercase_)
if token_ids_a is None:
return token_ids_a
else:
a__ =self._add_eos_if_not_present(lowercase_)
return token_ids_a + token_ids_a
def __getstate__( self) -> int:
a__ =self.__dict__.copy()
a__ =None
return state
def __setstate__( self , lowercase_) -> str:
a__ =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
a__ ={}
a__ =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __UpperCamelCase ( self , lowercase_ , **lowercase_) -> List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
a__ =SPIECE_UNDERLINE + text.replace(lowercase_ , ' ')
return super().tokenize(lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , **lowercase_) -> Tuple:
if not self.legacy:
a__ =text.startswith(lowercase_)
if is_first:
a__ =text[1:]
a__ =self.sp_model.encode(lowercase_ , out_type=lowercase_)
if not self.legacy and not is_first and not text.startswith(' ') and tokens[0].startswith(lowercase_):
a__ =([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def __UpperCamelCase ( self , lowercase_) -> Dict:
if token.startswith('<extra_id_'):
a__ =re.match(R'<extra_id_(\d+)>' , lowercase_)
a__ =int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> Dict:
if index < self.sp_model.get_piece_size():
a__ =self.sp_model.IdToPiece(lowercase_)
else:
a__ =F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def __UpperCamelCase ( self , lowercase_) -> Optional[Any]:
a__ =[]
a__ =''
a__ =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_) + token
a__ =True
a__ =[]
else:
current_sub_tokens.append(lowercase_)
a__ =False
out_string += self.sp_model.decode(lowercase_)
return out_string.strip()
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None) -> Tuple[str]:
if not os.path.isdir(lowercase_):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
a__ =os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , 'wb') as fi:
a__ =self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
| 20 |
def _lowercase( __a : list[int] ):
a__ =len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
a__ , a__ =numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_lowerCAmelCase: Tuple = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase: int = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 20 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCAmelCase: Optional[Any] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[str] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="resnet50" , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=True , lowercase_=True , ) -> Union[str, Any]:
a__ =parent
a__ =out_indices if out_indices is not None else [4]
a__ =stage_names
a__ =out_features
a__ =backbone
a__ =batch_size
a__ =image_size
a__ =num_channels
a__ =use_pretrained_backbone
a__ =is_training
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =self.get_config()
return config, pixel_values
def __UpperCamelCase ( self) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =TimmBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
a__ =model(lowercase_)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __UpperCamelCase ( self) -> str:
a__ =self.prepare_config_and_inputs()
a__ , a__ =config_and_inputs
a__ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(TimmBackbone,) if is_torch_available() else ()
snake_case ={'feature-extraction': TimmBackbone} if is_torch_available() else {}
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =TimmBackboneModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def __UpperCamelCase ( self) -> Dict:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self) -> str:
a__ ='resnet18'
a__ ='microsoft/resnet-18'
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_)
a__ =AutoBackbone.from_pretrained(lowercase_)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3])
a__ =AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> Dict:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.')
def __UpperCamelCase ( self) -> str:
pass
@unittest.skip('Safetensors is not supported by timm.')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self) -> Optional[Any]:
pass
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =True
a__ =self.has_attentions
# no need to test all models as different heads yield the same functionality
a__ =self.all_model_classes[0]
a__ =model_class(lowercase_)
model.to(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model(**lowercase_)
a__ =outputs[0][-1]
# Encoder-/Decoder-only models
a__ =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a__ =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def __UpperCamelCase ( self) -> List[str]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
a__ =copy.deepcopy(lowercase_)
a__ =None
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
a__ =copy.deepcopy(lowercase_)
a__ =False
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
| 20 | 1 |
from __future__ import annotations
import queue
class lowercase_ :
def __init__( self , lowercase_) -> Dict:
a__ =data
a__ =None
a__ =None
def _lowercase( ):
print('\n********Press N to stop entering at any point of time********\n' )
a__ =input('Enter the value of the root node: ' ).strip().lower()
a__ =queue.Queue()
a__ =TreeNode(int(__a ) )
q.put(__a )
while not q.empty():
a__ =q.get()
a__ =f"""Enter the left node of {node_found.data}: """
a__ =input(__a ).strip().lower() or 'n'
if check == "n":
return tree_node
a__ =TreeNode(int(__a ) )
a__ =left_node
q.put(__a )
a__ =f"""Enter the right node of {node_found.data}: """
a__ =input(__a ).strip().lower() or 'n'
if check == "n":
return tree_node
a__ =TreeNode(int(__a ) )
a__ =right_node
q.put(__a )
raise
def _lowercase( __a : TreeNode ):
if not isinstance(__a , __a ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def _lowercase( __a : TreeNode ):
if not isinstance(__a , __a ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def _lowercase( __a : TreeNode ):
if not isinstance(__a , __a ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def _lowercase( __a : TreeNode ):
if not isinstance(__a , __a ) or not node:
return
a__ =queue.Queue()
q.put(__a )
while not q.empty():
a__ =q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _lowercase( __a : TreeNode ):
if not isinstance(__a , __a ) or not node:
return
a__ =queue.Queue()
q.put(__a )
while not q.empty():
a__ =[]
while not q.empty():
a__ =q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__a )
def _lowercase( __a : TreeNode ):
if not isinstance(__a , __a ) or not node:
return
a__ =[]
a__ =node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(__a )
a__ =n.left
# end of while means current node doesn't have left child
a__ =stack.pop()
# start to traverse its right child
a__ =n.right
def _lowercase( __a : TreeNode ):
if not isinstance(__a , __a ) or not node:
return
a__ =[]
a__ =node
while n or stack:
while n:
stack.append(__a )
a__ =n.left
a__ =stack.pop()
print(n.data , end=',' )
a__ =n.right
def _lowercase( __a : TreeNode ):
if not isinstance(__a , __a ) or not node:
return
a__ , a__ =[], []
a__ =node
stacka.append(__a )
while stacka: # to find the reversed order of post order, store it in stack2
a__ =stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__a )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def _lowercase( __a : str = "" , __a : Optional[Any]=50 , __a : str="*" ):
if not s:
return "\n" + width * char
a__ , a__ =divmod(width - len(__a ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
_lowerCAmelCase: TreeNode = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 20 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCAmelCase: Optional[Any] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[str] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20 | 1 |
def _lowercase( __a : list ):
if not isinstance(__a , __a ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(__a ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(__a ) == 1:
return True
a__ =series[1] - series[0]
for index in range(len(__a ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _lowercase( __a : list ):
if not isinstance(__a , __a ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(__a ) == 0:
raise ValueError('Input list must be a non empty list' )
a__ =0
for val in series:
answer += val
return answer / len(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase: str = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowercase_ (lowercase__ ):
snake_case ='big_bird'
def __init__( self , lowercase_=50358 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu_new" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=4096 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=66 , lowercase_="block_sparse" , lowercase_=True , lowercase_=False , lowercase_=64 , lowercase_=3 , lowercase_=None , **lowercase_ , ) -> Any:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , )
a__ =vocab_size
a__ =max_position_embeddings
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =initializer_range
a__ =type_vocab_size
a__ =layer_norm_eps
a__ =use_cache
a__ =rescale_embeddings
a__ =attention_type
a__ =use_bias
a__ =block_size
a__ =num_random_blocks
a__ =classifier_dropout
class lowercase_ (lowercase__ ):
@property
def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a__ ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__ ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 20 | 1 |
from collections import Counter
from timeit import timeit
def _lowercase( __a : str = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def _lowercase( __a : str = "" ):
if len(__a ) == 0:
return True
a__ =input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
a__ ={}
for character in lower_case_input_str:
a__ =character_freq_dict.get(__a , 0 ) + 1
a__ =0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowercase( __a : str = "" ):
print('\nFor string = ' , __a , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(__a ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(__a ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
_lowerCAmelCase: Any = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
_lowerCAmelCase: Optional[Any] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 20 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger(__name__)
_lowerCAmelCase: Tuple = torch.device('cpu')
def _lowercase( ):
a__ ='http://images.cocodataset.org/val2017/000000039769.jpg'
a__ =Image.open(requests.get(__a , stream=__a ).raw )
return im
def _lowercase( __a : Optional[Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _lowercase( __a : int , __a : int , __a : Optional[Any] ):
a__ =dct.pop(__a )
a__ =val
def _lowercase( __a : Optional[Any] ):
a__ =[]
for k in state_dict.keys():
a__ =k
if ".pwconv" in k:
a__ =k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a__ =k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a__ =k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a__ =k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a__ =k_new.split('.' )
if ls[2].isdigit():
a__ ='swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__ =k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _lowercase( __a : Union[str, Any] , __a : int , __a : str ):
a__ =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ =1000
a__ ='huggingface/label-files'
a__ ='imagenet-1k-id2label.json'
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ =[3, 3, 6, 4]
a__ =[48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a__ =[3, 3, 9, 6]
a__ =[48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a__ =[4, 3, 10, 5]
a__ =[48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a__ =[4, 4, 12, 6]
a__ =[64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a__ =torch.hub.load_state_dict_from_url(__a , map_location='cpu' , check_hash=__a )
else:
a__ =torch.load(__a , map_location='cpu' )
a__ =checkpoint
a__ =create_rename_keys(__a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__a , __a , __a )
# load HuggingFace model
a__ =SwiftFormerForImageClassification(__a ).eval()
hf_model.load_state_dict(__a )
# prepare test inputs
a__ =prepare_img()
a__ =ViTImageProcessor.from_pretrained('preprocessor_config' )
a__ =processor(images=__a , return_tensors='pt' )
# compare outputs from both models
a__ =get_expected_output(__a )
a__ =hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __a , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_lowerCAmelCase: Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 20 | 1 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_lowerCAmelCase: List[str] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase_ (datasets.BuilderConfig ):
snake_case =None
snake_case ="utf-8"
snake_case =None
snake_case =None
snake_case =True # deprecated
snake_case =None # deprecated
snake_case =10 << 20 # 10MB
snake_case =None
class lowercase_ (datasets.ArrowBasedBuilder ):
snake_case =JsonConfig
def __UpperCamelCase ( self) -> List[str]:
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead')
a__ =self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.')
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported')
return datasets.DatasetInfo(features=self.config.features)
def __UpperCamelCase ( self , lowercase_) -> int:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""")
a__ =dl_manager.download_and_extract(self.config.data_files)
if isinstance(lowercase_ , (str, list, tuple)):
a__ =data_files
if isinstance(lowercase_ , lowercase_):
a__ =[files]
a__ =[dl_manager.iter_files(lowercase_) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})]
a__ =[]
for split_name, files in data_files.items():
if isinstance(lowercase_ , lowercase_):
a__ =[files]
a__ =[dl_manager.iter_files(lowercase_) for file in files]
splits.append(datasets.SplitGenerator(name=lowercase_ , gen_kwargs={'files': files}))
return splits
def __UpperCamelCase ( self , lowercase_) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
a__ =self.config.features.arrow_schema.field(lowercase_).type
a__ =pa_table.append_column(lowercase_ , pa.array([None] * len(lowercase_) , type=lowercase_))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
a__ =table_cast(lowercase_ , self.config.features.arrow_schema)
return pa_table
def __UpperCamelCase ( self , lowercase_) -> Optional[int]:
for file_idx, file in enumerate(itertools.chain.from_iterable(lowercase_)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(lowercase_ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
a__ =json.load(lowercase_)
# We keep only the field we are interested in
a__ =dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(lowercase_ , (list, tuple)):
a__ =set().union(*[row.keys() for row in dataset])
a__ ={col: [row.get(lowercase_) for row in dataset] for col in keys}
else:
a__ =dataset
a__ =pa.Table.from_pydict(lowercase_)
yield file_idx, self._cast_table(lowercase_)
# If the file has one json object per line
else:
with open(lowercase_ , 'rb') as f:
a__ =0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
a__ =max(self.config.chunksize // 32 , 16 << 10)
a__ =(
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
a__ =f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(lowercase_)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
a__ =batch.decode(self.config.encoding , errors=lowercase_).encode('utf-8')
try:
while True:
try:
a__ =paj.read_json(
io.BytesIO(lowercase_) , read_options=paj.ReadOptions(block_size=lowercase_))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(lowercase_ , pa.ArrowInvalid)
and "straddling" not in str(lowercase_)
or block_size > len(lowercase_)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(lowercase_)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
lowercase_ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
a__ =json.load(lowercase_)
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(lowercase_)}: {e}""")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(lowercase_ , lowercase_): # list is the only sequence type supported in JSON
try:
a__ =set().union(*[row.keys() for row in dataset])
a__ ={col: [row.get(lowercase_) for row in dataset] for col in keys}
a__ =pa.Table.from_pydict(lowercase_)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(lowercase_)}: {e}""")
raise ValueError(F"""Not able to read records in the JSON file at {file}.""") from None
yield file_idx, self._cast_table(lowercase_)
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(lowercase_)}: {e}""")
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowercase_)
batch_idx += 1
| 20 |
from __future__ import annotations
from typing import Any
class lowercase_ :
def __init__( self , lowercase_) -> None:
a__ =num_of_nodes
a__ =[]
a__ ={}
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
self.m_edges.append([u_node, v_node, weight])
def __UpperCamelCase ( self , lowercase_) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def __UpperCamelCase ( self , lowercase_) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
a__ =self.find_component(lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
if component_size[u_node] <= component_size[v_node]:
a__ =v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowercase_)
elif component_size[u_node] >= component_size[v_node]:
a__ =self.find_component(lowercase_)
component_size[u_node] += component_size[v_node]
self.set_component(lowercase_)
def __UpperCamelCase ( self) -> None:
a__ =[]
a__ =0
a__ =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
a__ =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
a__ =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowercase_ , lowercase_):
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowercase_ , lowercase_ , lowercase_)
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""")
num_of_components -= 1
a__ =[-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""")
def _lowercase( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class lowercase_ (lowercase__ ):
snake_case =42
@flax_register_to_config
class lowercase_ (nn.Module , lowercase__ , lowercase__ ):
snake_case =32
snake_case =4
snake_case =4
snake_case =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
snake_case =("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
snake_case =False
snake_case =(320, 640, 1280, 1280)
snake_case =2
snake_case =8
snake_case =None
snake_case =1280
snake_case =0.0
snake_case =False
snake_case =jnp.floataa
snake_case =True
snake_case =0
snake_case =False
def __UpperCamelCase ( self , lowercase_) -> FrozenDict:
# init input tensors
a__ =(1, self.in_channels, self.sample_size, self.sample_size)
a__ =jnp.zeros(lowercase_ , dtype=jnp.floataa)
a__ =jnp.ones((1,) , dtype=jnp.intaa)
a__ =jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa)
a__ , a__ =jax.random.split(lowercase_)
a__ ={'params': params_rng, 'dropout': dropout_rng}
return self.init(lowercase_ , lowercase_ , lowercase_ , lowercase_)["params"]
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =self.block_out_channels
a__ =block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.')
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
a__ =self.num_attention_heads or self.attention_head_dim
# input
a__ =nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
a__ =FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift)
a__ =FlaxTimestepEmbedding(lowercase_ , dtype=self.dtype)
a__ =self.only_cross_attention
if isinstance(lowercase_ , lowercase_):
a__ =(only_cross_attention,) * len(self.down_block_types)
if isinstance(lowercase_ , lowercase_):
a__ =(num_attention_heads,) * len(self.down_block_types)
# down
a__ =[]
a__ =block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types):
a__ =output_channel
a__ =block_out_channels[i]
a__ =i == len(lowercase_) - 1
if down_block_type == "CrossAttnDownBlock2D":
a__ =FlaxCrossAttnDownBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
a__ =FlaxDownBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowercase_)
a__ =down_blocks
# mid
a__ =FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
a__ =[]
a__ =list(reversed(lowercase_))
a__ =list(reversed(lowercase_))
a__ =list(reversed(lowercase_))
a__ =reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types):
a__ =output_channel
a__ =reversed_block_out_channels[i]
a__ =reversed_block_out_channels[min(i + 1 , len(lowercase_) - 1)]
a__ =i == len(lowercase_) - 1
if up_block_type == "CrossAttnUpBlock2D":
a__ =FlaxCrossAttnUpBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , prev_output_channel=lowercase_ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
a__ =FlaxUpBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , prev_output_channel=lowercase_ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(lowercase_)
a__ =output_channel
a__ =up_blocks
# out
a__ =nn.GroupNorm(num_groups=32 , epsilon=1e-5)
a__ =nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_ = True , lowercase_ = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(lowercase_ , jnp.ndarray):
a__ =jnp.array([timesteps] , dtype=jnp.intaa)
elif isinstance(lowercase_ , jnp.ndarray) and len(timesteps.shape) == 0:
a__ =timesteps.astype(dtype=jnp.floataa)
a__ =jnp.expand_dims(lowercase_ , 0)
a__ =self.time_proj(lowercase_)
a__ =self.time_embedding(lowercase_)
# 2. pre-process
a__ =jnp.transpose(lowercase_ , (0, 2, 3, 1))
a__ =self.conv_in(lowercase_)
# 3. down
a__ =(sample,)
for down_block in self.down_blocks:
if isinstance(lowercase_ , lowercase_):
a__ , a__ =down_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train)
else:
a__ , a__ =down_block(lowercase_ , lowercase_ , deterministic=not train)
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
a__ =()
for down_block_res_sample, down_block_additional_residual in zip(
lowercase_ , lowercase_):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
a__ =new_down_block_res_samples
# 4. mid
a__ =self.mid_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train)
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
a__ =down_block_res_samples[-(self.layers_per_block + 1) :]
a__ =down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(lowercase_ , lowercase_):
a__ =up_block(
lowercase_ , temb=lowercase_ , encoder_hidden_states=lowercase_ , res_hidden_states_tuple=lowercase_ , deterministic=not train , )
else:
a__ =up_block(lowercase_ , temb=lowercase_ , res_hidden_states_tuple=lowercase_ , deterministic=not train)
# 6. post-process
a__ =self.conv_norm_out(lowercase_)
a__ =nn.silu(lowercase_)
a__ =self.conv_out(lowercase_)
a__ =jnp.transpose(lowercase_ , (0, 3, 1, 2))
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=lowercase_)
| 20 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCAmelCase: Union[str, Any] = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
_lowerCAmelCase: Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
_lowerCAmelCase: List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
def __UpperCamelCase ( self) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=True , lowercase_=False) -> Any:
if rouge_types is None:
a__ =['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
a__ =rouge_scorer.RougeScorer(rouge_types=lowercase_ , use_stemmer=lowercase_)
if use_aggregator:
a__ =scoring.BootstrapAggregator()
else:
a__ =[]
for ref, pred in zip(lowercase_ , lowercase_):
a__ =scorer.score(lowercase_ , lowercase_)
if use_aggregator:
aggregator.add_scores(lowercase_)
else:
scores.append(lowercase_)
if use_aggregator:
a__ =aggregator.aggregate()
else:
a__ ={}
for key in scores[0]:
a__ =[score[key] for score in scores]
return result
| 20 | 1 |
def _lowercase( __a : int ):
a__ =(1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _lowercase( __a : int = 5000 ):
a__ =[(i * (3 * i - 1)) // 2 for i in range(1 , __a )]
for i, pentagonal_i in enumerate(__a ):
for j in range(__a , len(__a ) ):
a__ =pentagonal_nums[j]
a__ =pentagonal_i + pentagonal_j
a__ =pentagonal_j - pentagonal_i
if is_pentagonal(__a ) and is_pentagonal(__a ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 |
from __future__ import annotations
_lowerCAmelCase: str = '#'
class lowercase_ :
def __init__( self) -> None:
a__ ={}
def __UpperCamelCase ( self , lowercase_) -> None:
a__ =self._trie
for char in text:
if char not in trie:
a__ ={}
a__ =trie[char]
a__ =True
def __UpperCamelCase ( self , lowercase_) -> tuple | list:
a__ =self._trie
for char in prefix:
if char in trie:
a__ =trie[char]
else:
return []
return self._elements(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> tuple:
a__ =[]
for c, v in d.items():
a__ =[' '] if c == END else [(c + s) for s in self._elements(lowercase_)]
result.extend(lowercase_)
return tuple(lowercase_)
_lowerCAmelCase: Optional[int] = Trie()
_lowerCAmelCase: List[str] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _lowercase( __a : str ):
a__ =trie.find_word(__a )
return tuple(string + word for word in suffixes )
def _lowercase( ):
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class lowercase_ (unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=18 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=32 , lowercase_=True , ) -> str:
a__ =parent
a__ =batch_size
a__ =num_channels
a__ =image_size
a__ =min_resolution
a__ =max_resolution
a__ =do_resize
a__ =size_divisor
a__ =do_rescale
def __UpperCamelCase ( self) -> Dict:
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =GLPNImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self) -> str:
a__ =GLPNImageProcessingTester(self)
@property
def __UpperCamelCase ( self) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase_ , 'do_resize'))
self.assertTrue(hasattr(lowercase_ , 'size_divisor'))
self.assertTrue(hasattr(lowercase_ , 'resample'))
self.assertTrue(hasattr(lowercase_ , 'do_rescale'))
def __UpperCamelCase ( self) -> Optional[int]:
pass
def __UpperCamelCase ( self) -> Optional[int]:
# Initialize image_processing
a__ =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a__ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image)
# Test not batched input (GLPNImageProcessor doesn't support batching)
a__ =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
def __UpperCamelCase ( self) -> int:
# Initialize image_processing
a__ =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a__ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray)
# Test not batched input (GLPNImageProcessor doesn't support batching)
a__ =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
def __UpperCamelCase ( self) -> Optional[Any]:
# Initialize image_processing
a__ =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a__ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor)
# Test not batched input (GLPNImageProcessor doesn't support batching)
a__ =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
| 20 |
_lowerCAmelCase: List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _lowercase( ):
a__ =input('Enter message: ' )
a__ =input('Enter key [alphanumeric]: ' )
a__ =input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
a__ ='encrypt'
a__ =encrypt_message(__a , __a )
elif mode.lower().startswith('d' ):
a__ ='decrypt'
a__ =decrypt_message(__a , __a )
print(f"""\n{mode.title()}ed message:""" )
print(__a )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'encrypt' )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'decrypt' )
def _lowercase( __a : str , __a : str , __a : str ):
a__ =[]
a__ =0
a__ =key.upper()
for symbol in message:
a__ =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__a )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__a ):
a__ =0
else:
translated.append(__a )
return "".join(__a )
if __name__ == "__main__":
main()
| 20 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase: Optional[int] = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: Optional[Any] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: Optional[int] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: Optional[int] = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: Optional[Any] = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_lowerCAmelCase: Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20 | 1 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_lowerCAmelCase: int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowercase( __a : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
_lowerCAmelCase: str = parser.parse_args()
_lowerCAmelCase: Tuple = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 20 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =KandinskyVaaPriorPipeline
snake_case =['prompt']
snake_case =['prompt', 'negative_prompt']
snake_case =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
snake_case =False
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 32
@property
def __UpperCamelCase ( self) -> Tuple:
return 32
@property
def __UpperCamelCase ( self) -> int:
return self.time_input_dim
@property
def __UpperCamelCase ( self) -> str:
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 100
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
torch.manual_seed(0)
a__ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase_)
@property
def __UpperCamelCase ( self) -> Tuple:
torch.manual_seed(0)
a__ ={
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
a__ =PriorTransformer(**lowercase_)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a__ =nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def __UpperCamelCase ( self) -> Any:
torch.manual_seed(0)
a__ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a__ =CLIPVisionModelWithProjection(lowercase_)
return model
@property
def __UpperCamelCase ( self) -> Optional[int]:
a__ =CLIPImageProcessor(
crop_size=224 , do_center_crop=lowercase_ , do_normalize=lowercase_ , do_resize=lowercase_ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def __UpperCamelCase ( self) -> Any:
a__ =self.dummy_prior
a__ =self.dummy_image_encoder
a__ =self.dummy_text_encoder
a__ =self.dummy_tokenizer
a__ =self.dummy_image_processor
a__ =UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=lowercase_ , clip_sample_range=10.0 , )
a__ ={
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def __UpperCamelCase ( self , lowercase_ , lowercase_=0) -> Tuple:
if str(lowercase_).startswith('mps'):
a__ =torch.manual_seed(lowercase_)
else:
a__ =torch.Generator(device=lowercase_).manual_seed(lowercase_)
a__ ={
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self) -> int:
a__ ='cpu'
a__ =self.get_dummy_components()
a__ =self.pipeline_class(**lowercase_)
a__ =pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
a__ =pipe(**self.get_dummy_inputs(lowercase_))
a__ =output.image_embeds
a__ =pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
a__ =image[0, -10:]
a__ =image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a__ =np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def __UpperCamelCase ( self) -> List[Any]:
a__ =torch_device == 'cpu'
a__ =True
a__ =False
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ , relax_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
@skip_mps
def __UpperCamelCase ( self) -> Optional[int]:
a__ =torch_device == 'cpu'
a__ =False
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
| 20 | 1 |
from collections.abc import Callable
import numpy as np
def _lowercase( __a : Callable , __a : float , __a : float , __a : float , __a : float ):
a__ =int(np.ceil((x_end - xa) / step_size ) )
a__ =np.zeros((n + 1,) )
a__ =ya
a__ =xa
for k in range(__a ):
a__ =y[k] + step_size * ode_func(__a , y[k] )
a__ =y[k] + (
(step_size / 2) * (ode_func(__a , y[k] ) + ode_func(x + step_size , __a ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
from manim import *
class lowercase_ (lowercase__ ):
def __UpperCamelCase ( self) -> List[Any]:
a__ =Rectangle(height=0.5 , width=0.5)
a__ =Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
a__ =[mem.copy() for i in range(6)]
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('CPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(4)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('GPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
gpu.move_to([-1, -1, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Model' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
model.move_to([3, -1.0, 0])
self.add(lowercase_)
a__ =[]
for i, rect in enumerate(lowercase_):
rect.set_stroke(lowercase_)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
a__ =Rectangle(height=0.46 / 4 , width=0.46 / 3).set_stroke(width=0.0).set_fill(lowercase_ , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowercase_)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0)
self.add(lowercase_)
cpu_targs.append(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Loaded Checkpoint' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4)
checkpoint.move_to([3, 0.5, 0])
a__ =Square(side_length=2.2)
key.move_to([-5, 2, 0])
a__ =MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(lowercase_ , lowercase_)
a__ =MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left())
a__ =MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(lowercase_) , Write(lowercase_))
self.play(Write(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1))
a__ =[]
a__ =[]
for i, rect in enumerate(lowercase_):
a__ =fill.copy().set_fill(lowercase_ , opacity=0.7)
target.move_to(lowercase_)
first_animations.append(GrowFromCenter(lowercase_ , run_time=1))
a__ =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5))
self.play(*lowercase_)
self.play(*lowercase_)
self.wait()
| 20 | 1 |
from collections.abc import Iterable
from typing import Any
class lowercase_ :
def __init__( self , lowercase_ = None) -> Dict:
a__ =value
a__ =None # Added in order to delete a node easier
a__ =None
a__ =None
def __repr__( self) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value)
return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1)
class lowercase_ :
def __init__( self , lowercase_ = None) -> int:
a__ =root
def __str__( self) -> str:
return str(self.root)
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> None:
if new_children is not None: # reset its kids
a__ =node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowercase_): # If it is the right children
a__ =new_children
else:
a__ =new_children
else:
a__ =new_children
def __UpperCamelCase ( self , lowercase_) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __UpperCamelCase ( self) -> bool:
return self.root is None
def __UpperCamelCase ( self , lowercase_) -> None:
a__ =Node(lowercase_) # create a new Node
if self.empty(): # if Tree is empty
a__ =new_node # set its root
else: # Tree is not empty
a__ =self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
a__ =new_node # We insert the new node in a leaf
break
else:
a__ =parent_node.left
else:
if parent_node.right is None:
a__ =new_node
break
else:
a__ =parent_node.right
a__ =parent_node
def __UpperCamelCase ( self , *lowercase_) -> None:
for value in values:
self.__insert(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> Node | None:
if self.empty():
raise IndexError('Warning: Tree is empty! please use another.')
else:
a__ =self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
a__ =node.left if value < node.value else node.right
return node
def __UpperCamelCase ( self , lowercase_ = None) -> Node | None:
if node is None:
if self.root is None:
return None
a__ =self.root
if not self.empty():
while node.right is not None:
a__ =node.right
return node
def __UpperCamelCase ( self , lowercase_ = None) -> Node | None:
if node is None:
a__ =self.root
if self.root is None:
return None
if not self.empty():
a__ =self.root
while node.left is not None:
a__ =node.left
return node
def __UpperCamelCase ( self , lowercase_) -> None:
a__ =self.search(lowercase_) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowercase_ , lowercase_)
elif node.left is None: # Has only right children
self.__reassign_nodes(lowercase_ , node.right)
elif node.right is None: # Has only left children
self.__reassign_nodes(lowercase_ , node.left)
else:
a__ =self.get_max(
node.left) # Gets the max value of the left branch
self.remove(tmp_node.value) # type: ignore
a__ =(
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __UpperCamelCase ( self , lowercase_) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left)
yield from self.preorder_traverse(node.right)
def __UpperCamelCase ( self , lowercase_=None) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root)
else:
return traversal_function(self.root)
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> None:
if node:
self.inorder(lowercase_ , node.left)
arr.append(node.value)
self.inorder(lowercase_ , node.right)
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> int:
a__ =[]
self.inorder(lowercase_ , lowercase_) # append all values to list using inorder traversal
return arr[k - 1]
def _lowercase( __a : Node | None ):
a__ =[]
if curr_node is not None:
a__ =postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _lowercase( ):
a__ =(8, 3, 6, 1, 10, 14, 13, 4, 7)
a__ =BinarySearchTree()
for i in testlist:
t.insert(__a )
# Prints all the elements of the list in order traversal
print(__a )
if t.search(6 ) is not None:
print('The value 6 exists' )
else:
print('The value 6 doesn\'t exist' )
if t.search(-1 ) is not None:
print('The value -1 exists' )
else:
print('The value -1 doesn\'t exist' )
if not t.empty():
print('Max Value: ' , t.get_max().value ) # type: ignore
print('Min Value: ' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(__a )
print(__a )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 20 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowerCAmelCase: Any = sys.version_info >= (3, 10)
def _lowercase( __a : int=None , __a : Any=None ):
return field(default_factory=lambda: default , metadata=__a )
@dataclass
class lowercase_ :
snake_case =42
snake_case =42
snake_case =42
snake_case =42
@dataclass
class lowercase_ :
snake_case =42
snake_case =field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
snake_case =42
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =BasicEnum(self.foo)
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =MixedTypeEnum(self.foo)
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
@dataclass
class lowercase_ :
snake_case =list_field(default=[] )
snake_case =list_field(default=[1, 2, 3] )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case =list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowercase_ :
snake_case =field()
snake_case =field()
snake_case =field()
def __UpperCamelCase ( self) -> List[Any]:
a__ =BasicEnum(self.required_enum)
@dataclass
class lowercase_ :
snake_case =42
snake_case =field()
snake_case =None
snake_case =field(default='toto' , metadata={'help': 'help message'} )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> int:
self.assertEqual(len(a._actions) , len(b._actions))
for x, y in zip(a._actions , b._actions):
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , lowercase_) and yy.get('choices' , lowercase_):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](lowercase_) , yy['type'](lowercase_))
del xx["type"], yy["type"]
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument('--bar' , type=lowercase_ , required=lowercase_)
expected.add_argument('--baz' , type=lowercase_ , required=lowercase_)
expected.add_argument('--flag' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
self.argparsersEqual(lowercase_ , lowercase_)
a__ =['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((a__) , ) =parser.parse_args_into_dataclasses(lowercase_ , look_for_args_file=lowercase_)
self.assertFalse(example.flag)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
expected.add_argument('--baz' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=lowercase_ , dest='baz')
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
a__ =[WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--no_baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto)
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args_into_dataclasses(['--foo', 'titi'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi)
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
a__ =parser.parse_args_into_dataclasses(['--foo', '42'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo)
def __UpperCamelCase ( self) -> List[Any]:
@dataclass
class lowercase_ :
snake_case ="toto"
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
def __UpperCamelCase ( self) -> Optional[int]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=lowercase_)
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(
lowercase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3]) , )
a__ =parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split())
self.assertEqual(lowercase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7]))
def __UpperCamelCase ( self) -> Dict:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=lowercase_ , type=lowercase_)
expected.add_argument('--bar' , default=lowercase_ , type=lowercase_ , help='help message')
expected.add_argument('--baz' , default=lowercase_ , type=lowercase_)
expected.add_argument('--ces' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--des' , nargs='+' , default=[] , type=lowercase_)
a__ =[OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , bar=lowercase_ , baz=lowercase_ , ces=[] , des=[]))
a__ =parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split())
self.assertEqual(lowercase_ , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3]))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=lowercase_ , required=lowercase_)
expected.add_argument('--required_str' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
a__ =parser.parse_dict(lowercase_)[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(lowercase_ , parser.parse_dict , lowercase_ , allow_extra_keys=lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_json')
os.mkdir(lowercase_)
with open(temp_local_path + '.json' , 'w+') as f:
json.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.json'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_yaml')
os.mkdir(lowercase_)
with open(temp_local_path + '.yaml' , 'w+') as f:
yaml.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.yaml'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
self.assertIsNotNone(lowercase_)
| 20 | 1 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def _lowercase( __a : ndarray ):
return np.dot(__a , __a )
class lowercase_ :
def __init__( self , *,
lowercase_ = np.inf , lowercase_ = "linear" , lowercase_ = 0.0 , ) -> None:
a__ =regularization
a__ =gamma
if kernel == "linear":
a__ =self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma')
if not isinstance(self.gamma , (float, int)):
raise ValueError('gamma must be float or int')
if not self.gamma > 0:
raise ValueError('gamma must be > 0')
a__ =self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
a__ =F"""Unknown kernel: {kernel}"""
raise ValueError(lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> float:
return np.dot(lowercase_ , lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> float:
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> None:
a__ =observations
a__ =classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((a__) , ) =np.shape(lowercase_)
def to_minimize(lowercase_) -> float:
a__ =0
((a__) , ) =np.shape(lowercase_)
for i in range(lowercase_):
for j in range(lowercase_):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(lowercase_)
a__ =LinearConstraint(lowercase_ , 0 , 0)
a__ =Bounds(0 , self.regularization)
a__ =minimize(
lowercase_ , np.ones(lowercase_) , bounds=lowercase_ , constraints=[ly_contraint]).x
a__ =l_star
# calculating mean offset of separation plane to points
a__ =0
for i in range(lowercase_):
for j in range(lowercase_):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
a__ =s / n
def __UpperCamelCase ( self , lowercase_) -> int:
a__ =sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , lowercase_)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class lowercase_ (lowercase__ ):
snake_case ='autoformer'
snake_case ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , lowercase_ = None , lowercase_ = None , lowercase_ = "student_t" , lowercase_ = "nll" , lowercase_ = 1 , lowercase_ = [1, 2, 3, 4, 5, 6, 7] , lowercase_ = True , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 64 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 32 , lowercase_ = 32 , lowercase_ = "gelu" , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 100 , lowercase_ = 0.02 , lowercase_ = True , lowercase_=True , lowercase_ = 10 , lowercase_ = 25 , lowercase_ = 3 , **lowercase_ , ) -> Union[str, Any]:
# time series specific configuration
a__ =prediction_length
a__ =context_length if context_length is not None else prediction_length
a__ =distribution_output
a__ =loss
a__ =input_size
a__ =num_time_features
a__ =lags_sequence
a__ =scaling
a__ =num_dynamic_real_features
a__ =num_static_real_features
a__ =num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`')
a__ =cardinality
else:
a__ =[0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`')
a__ =embedding_dimension
else:
a__ =[min(50 , (cat + 1) // 2) for cat in self.cardinality]
a__ =num_parallel_samples
# Transformer architecture configuration
a__ =input_size * len(self.lags_sequence) + self._number_of_features
a__ =d_model
a__ =encoder_attention_heads
a__ =decoder_attention_heads
a__ =encoder_ffn_dim
a__ =decoder_ffn_dim
a__ =encoder_layers
a__ =decoder_layers
a__ =dropout
a__ =attention_dropout
a__ =activation_dropout
a__ =encoder_layerdrop
a__ =decoder_layerdrop
a__ =activation_function
a__ =init_std
a__ =use_cache
# Autoformer
a__ =label_length
a__ =moving_average
a__ =autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def __UpperCamelCase ( self) -> int:
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 20 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase: Union[str, Any] = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: Any = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: Tuple = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: Optional[int] = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: int = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase: Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
class lowercase_ (lowercase__ ):
snake_case =['pixel_values']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
super().__init__(**lowercase_)
a__ =size if size is not None else {'shortest_edge': 256}
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =crop_size if crop_size is not None else {'height': 224, 'width': 224}
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_resize
a__ =size
a__ =resample
a__ =do_center_crop
a__ =crop_size
a__ =do_rescale
a__ =rescale_factor
a__ =do_normalize
a__ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a__ =get_resize_output_image_size(lowercase_ , size=size['shortest_edge'] , default_to_square=lowercase_)
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_) -> np.ndarray:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> Tuple:
a__ =do_resize if do_resize is not None else self.do_resize
a__ =size if size is not None else self.size
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =resample if resample is not None else self.resample
a__ =do_center_crop if do_center_crop is not None else self.do_center_crop
a__ =crop_size if crop_size is not None else self.crop_size
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_rescale if do_rescale is not None else self.do_rescale
a__ =rescale_factor if rescale_factor is not None else self.rescale_factor
a__ =do_normalize if do_normalize is not None else self.do_normalize
a__ =image_mean if image_mean is not None else self.image_mean
a__ =image_std if image_std is not None else self.image_std
a__ =make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a__ =[to_numpy_array(lowercase_) for image in images]
if do_resize:
a__ =[self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_center_crop:
a__ =[self.center_crop(image=lowercase_ , size=lowercase_) for image in images]
if do_rescale:
a__ =[self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_normalize:
a__ =[self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images]
a__ =[to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
a__ ={'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None) -> str:
a__ =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_) != len(lowercase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(lowercase_):
a__ =target_sizes.numpy()
a__ =[]
for idx in range(len(lowercase_)):
a__ =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_)
a__ =resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowercase_)
else:
a__ =logits.argmax(dim=1)
a__ =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 20 | 1 |
def _lowercase( __a : int = 5000_0000 ):
a__ =set()
a__ =int((limit - 24) ** (1 / 2) )
a__ =set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , __a ) ) )
for primea in primes:
a__ =primea * primea
for primea in primes:
a__ =primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
a__ =primea * primea * primea * primea
a__ =square + cube + tetr
if total >= limit:
break
ret.add(__a )
return len(__a )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 |
from importlib import import_module
from .logging import get_logger
_lowerCAmelCase: str = get_logger(__name__)
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None) -> Tuple:
a__ =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__'):
setattr(self , lowercase_ , getattr(lowercase_ , lowercase_))
a__ =module._original_module if isinstance(lowercase_ , _PatchedModuleObj) else module
class lowercase_ :
snake_case =[]
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> List[str]:
a__ =obj
a__ =target
a__ =new
a__ =target.split('.')[0]
a__ ={}
a__ =attrs or []
def __enter__( self) -> Optional[int]:
*a__ , a__ =self.target.split('.')
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase_)):
try:
a__ =import_module('.'.join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
a__ =getattr(self.obj , lowercase_)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase_ , _PatchedModuleObj) and obj_attr._original_module is submodule)
):
a__ =obj_attr
# patch at top level
setattr(self.obj , lowercase_ , _PatchedModuleObj(lowercase_ , attrs=self.attrs))
a__ =getattr(self.obj , lowercase_)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase_ , lowercase_ , _PatchedModuleObj(getattr(lowercase_ , lowercase_ , lowercase_) , attrs=self.attrs))
a__ =getattr(lowercase_ , lowercase_)
# finally set the target attribute
setattr(lowercase_ , lowercase_ , self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
a__ =getattr(import_module('.'.join(lowercase_)) , lowercase_)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase_) is attr_value:
a__ =getattr(self.obj , lowercase_)
setattr(self.obj , lowercase_ , self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
a__ =globals()['__builtins__'][target_attr]
setattr(self.obj , lowercase_ , self.new)
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""")
def __exit__( self , *lowercase_) -> str:
for attr in list(self.original):
setattr(self.obj , lowercase_ , self.original.pop(lowercase_))
def __UpperCamelCase ( self) -> Any:
self.__enter__()
self._active_patches.append(self)
def __UpperCamelCase ( self) -> Union[str, Any]:
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 20 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =StableDiffusionInstructPixaPixPipeline
snake_case =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
snake_case =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case =IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case =IMAGE_TO_IMAGE_IMAGE_PARAMS
def __UpperCamelCase ( self) -> Any:
torch.manual_seed(0)
a__ =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
a__ =PNDMScheduler(skip_prk_steps=lowercase_)
torch.manual_seed(0)
a__ =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
a__ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
a__ =CLIPTextModel(lowercase_)
a__ =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
a__ ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCamelCase ( self , lowercase_ , lowercase_=0) -> Optional[Any]:
a__ =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_)).to(lowercase_)
a__ =image.cpu().permute(0 , 2 , 3 , 1)[0]
a__ =Image.fromarray(np.uinta(lowercase_)).convert('RGB')
if str(lowercase_).startswith('mps'):
a__ =torch.manual_seed(lowercase_)
else:
a__ =torch.Generator(device=lowercase_).manual_seed(lowercase_)
a__ ={
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __UpperCamelCase ( self) -> Dict:
a__ ='cpu' # ensure determinism for the device-dependent torch.Generator
a__ =self.get_dummy_components()
a__ =StableDiffusionInstructPixaPixPipeline(**lowercase_)
a__ =sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
a__ =self.get_dummy_inputs(lowercase_)
a__ =sd_pipe(**lowercase_).images
a__ =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a__ =np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def __UpperCamelCase ( self) -> Optional[int]:
a__ ='cpu' # ensure determinism for the device-dependent torch.Generator
a__ =self.get_dummy_components()
a__ =StableDiffusionInstructPixaPixPipeline(**lowercase_)
a__ =sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
a__ =self.get_dummy_inputs(lowercase_)
a__ ='french fries'
a__ =sd_pipe(**lowercase_ , negative_prompt=lowercase_)
a__ =output.images
a__ =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a__ =np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def __UpperCamelCase ( self) -> str:
a__ ='cpu' # ensure determinism for the device-dependent torch.Generator
a__ =self.get_dummy_components()
a__ =StableDiffusionInstructPixaPixPipeline(**lowercase_)
a__ =sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
a__ =self.get_dummy_inputs(lowercase_)
a__ =[inputs['prompt']] * 2
a__ =np.array(inputs['image']).astype(np.floataa) / 2_55.0
a__ =torch.from_numpy(lowercase_).unsqueeze(0).to(lowercase_)
a__ =image / 2 + 0.5
a__ =image.permute(0 , 3 , 1 , 2)
a__ =image.repeat(2 , 1 , 1 , 1)
a__ =sd_pipe(**lowercase_).images
a__ =image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
a__ =np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ ='cpu' # ensure determinism for the device-dependent torch.Generator
a__ =self.get_dummy_components()
a__ =EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear')
a__ =StableDiffusionInstructPixaPixPipeline(**lowercase_)
a__ =sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
a__ =self.get_dummy_inputs(lowercase_)
a__ =sd_pipe(**lowercase_).images
a__ =image[0, -3:, -3:, -1]
a__ =[round(lowercase_ , 4) for x in image_slice.flatten().tolist()]
print(','.join([str(lowercase_) for x in slice]))
assert image.shape == (1, 32, 32, 3)
a__ =np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def __UpperCamelCase ( self) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
def __UpperCamelCase ( self) -> Optional[int]:
a__ =self.get_dummy_components()
a__ =StableDiffusionInstructPixaPixPipeline(**lowercase_)
a__ =VaeImageProcessor(do_resize=lowercase_ , do_normalize=lowercase_)
a__ =pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
a__ =pipe(**self.get_dummy_inputs_by_type(lowercase_ , input_image_type='pt'))[0]
a__ =components['vae']
a__ =self.get_dummy_inputs_by_type(lowercase_ , input_image_type='pt')
for image_param in self.image_latents_params:
if image_param in inputs.keys():
a__ =vae.encode(inputs[image_param]).latent_dist.mode()
a__ =pipe(**lowercase_)[0]
a__ =np.abs(out - out_latents_inputs).max()
self.assertLess(lowercase_ , 1e-4 , 'passing latents as image input generate different result from passing image')
@slow
@require_torch_gpu
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self , lowercase_=0) -> Dict:
a__ =torch.manual_seed(lowercase_)
a__ =load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg')
a__ ={
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def __UpperCamelCase ( self) -> str:
a__ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
a__ =self.get_inputs()
a__ =pipe(**lowercase_).images
a__ =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
a__ =np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55])
assert np.abs(expected_slice - image_slice).max() < 1e-3
def __UpperCamelCase ( self) -> str:
a__ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowercase_)
a__ =LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
a__ =self.get_inputs()
a__ =pipe(**lowercase_).images
a__ =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
a__ =np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01])
assert np.abs(expected_slice - image_slice).max() < 1e-3
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowercase_)
a__ =DDIMScheduler.from_config(pipe.scheduler.config)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
a__ =self.get_inputs()
a__ =pipe(**lowercase_).images
a__ =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
a__ =np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53])
assert np.abs(expected_slice - image_slice).max() < 1e-3
def __UpperCamelCase ( self) -> Tuple:
a__ =0
def callback_fn(lowercase_ , lowercase_ , lowercase_) -> None:
a__ =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
a__ =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
a__ =latents[0, -3:, -3:, -1]
a__ =np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
elif step == 2:
a__ =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
a__ =latents[0, -3:, -3:, -1]
a__ =np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
a__ =False
a__ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowercase_ , torch_dtype=torch.floataa)
a__ =pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
a__ =self.get_inputs()
pipe(**lowercase_ , callback=lowercase_ , callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 3
def __UpperCamelCase ( self) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a__ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowercase_ , torch_dtype=torch.floataa)
a__ =pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
a__ =self.get_inputs()
a__ =pipe(**lowercase_)
a__ =torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
a__ =inputs['image'].resize((504, 504))
a__ ='timbrooks/instruct-pix2pix'
a__ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowercase_ , safety_checker=lowercase_ , )
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
a__ =pipe(**lowercase_)
a__ =output.images[0]
a__ =image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
a__ =np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3
| 20 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_lowerCAmelCase: int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowercase( __a : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
_lowerCAmelCase: str = parser.parse_args()
_lowerCAmelCase: Tuple = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 20 | 1 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowercase( __a : NDArray[floataa] , __a : NDArray[floataa] , __a : list[int] , __a : int , ):
a__ , a__ =coefficient_matrix.shape
a__ , a__ =constant_matrix.shape
if rowsa != colsa:
a__ =f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(__a )
if colsa != 1:
a__ =f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(__a )
if rowsa != rowsa:
a__ =(
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(__a )
if len(__a ) != rowsa:
a__ =(
'Number of initial values must be equal to number of rows in coefficient '
f"""matrix but received {len(__a )} and {rowsa}"""
)
raise ValueError(__a )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
a__ =np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
a__ , a__ =table.shape
strictly_diagonally_dominant(__a )
# Iterates the whole matrix for given number of times
for _ in range(__a ):
a__ =[]
for row in range(__a ):
a__ =0
for col in range(__a ):
if col == row:
a__ =table[row][col]
elif col == cols - 1:
a__ =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
a__ =(temp + val) / denom
new_val.append(__a )
a__ =new_val
return [float(__a ) for i in new_val]
def _lowercase( __a : NDArray[floataa] ):
a__ , a__ =table.shape
a__ =True
for i in range(0 , __a ):
a__ =0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase: Tuple = get_logger(__name__)
_lowerCAmelCase: List[str] = Path(__file__).parent / 'model_card_template.md'
_lowerCAmelCase: Any = uuida().hex
_lowerCAmelCase: List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: int = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def _lowercase( __a : Union[Dict, str, None] = None ):
a__ =f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__a , __a ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__a , __a ):
ua += "; " + user_agent
return ua
def _lowercase( __a : str , __a : Optional[str] = None , __a : Optional[str] = None ):
if token is None:
a__ =HfFolder.get_token()
if organization is None:
a__ =whoami(__a )['name']
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def _lowercase( __a : Union[str, Any] , __a : Dict ):
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(__a , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
a__ =args.hub_token if hasattr(__a , 'hub_token' ) else None
a__ =get_full_repo_name(__a , token=__a )
a__ =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__a , model_name=__a , repo_name=__a , dataset_name=args.dataset_name if hasattr(__a , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__a , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__a , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__a , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__a , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__a , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__a , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(__a , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__a , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
a__ =os.path.join(args.output_dir , 'README.md' )
model_card.save(__a )
def _lowercase( __a : Optional[str] , __a : Optional[str] = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
a__ =str(Path(__a ).as_posix() )
a__ =re.search(r'snapshots/([^/]+)/' , __a )
if search is None:
return None
a__ =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase: List[str] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
_lowerCAmelCase: List[str] = os.path.join(hf_cache_home, 'diffusers')
def _lowercase( __a : Optional[str] = None , __a : Optional[str] = None ):
if new_cache_dir is None:
a__ =DIFFUSERS_CACHE
if old_cache_dir is None:
a__ =old_diffusers_cache
a__ =Path(__a ).expanduser()
a__ =Path(__a ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
a__ =new_cache_dir / old_blob_path.relative_to(__a )
new_blob_path.parent.mkdir(parents=__a , exist_ok=__a )
os.replace(__a , __a )
try:
os.symlink(__a , __a )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase: Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
_lowerCAmelCase: int = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase: List[Any] = int(f.read())
except ValueError:
_lowerCAmelCase: Any = 0
if cache_version < 1:
_lowerCAmelCase: str = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase: Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def _lowercase( __a : str , __a : Optional[str] = None ):
if variant is not None:
a__ =weights_name.split('.' )
a__ =splits[:-1] + [variant] + splits[-1:]
a__ ='.'.join(__a )
return weights_name
def _lowercase( __a : Union[str, Any] , *,
__a : Optional[Any] , __a : Optional[Any] , __a : List[Any] , __a : Tuple , __a : Optional[Any] , __a : Dict , __a : str , __a : int , __a : Tuple , __a : Union[str, Any] , __a : int=None , ):
a__ =str(__a )
if os.path.isfile(__a ):
return pretrained_model_name_or_path
elif os.path.isdir(__a ):
if os.path.isfile(os.path.join(__a , __a ) ):
# Load from a PyTorch checkpoint
a__ =os.path.join(__a , __a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__a , __a , __a ) ):
a__ =os.path.join(__a , __a , __a )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__a ).base_version ) >= version.parse('0.20.0' )
):
try:
a__ =hf_hub_download(
__a , filename=_add_variant(__a , __a ) , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __a , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__a , __a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__a , __a )}' so that the correct variant file can be added.""" , __a , )
try:
# 2. Load model file as usual
a__ =hf_hub_download(
__a , filename=__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'this model name. Check the model page at '
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 20 | 1 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =KandinskyVaaPriorPipeline
snake_case =['prompt']
snake_case =['prompt', 'negative_prompt']
snake_case =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
snake_case =False
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 32
@property
def __UpperCamelCase ( self) -> Tuple:
return 32
@property
def __UpperCamelCase ( self) -> int:
return self.time_input_dim
@property
def __UpperCamelCase ( self) -> str:
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 100
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
torch.manual_seed(0)
a__ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase_)
@property
def __UpperCamelCase ( self) -> Tuple:
torch.manual_seed(0)
a__ ={
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
a__ =PriorTransformer(**lowercase_)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a__ =nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def __UpperCamelCase ( self) -> Any:
torch.manual_seed(0)
a__ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a__ =CLIPVisionModelWithProjection(lowercase_)
return model
@property
def __UpperCamelCase ( self) -> Optional[int]:
a__ =CLIPImageProcessor(
crop_size=224 , do_center_crop=lowercase_ , do_normalize=lowercase_ , do_resize=lowercase_ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def __UpperCamelCase ( self) -> Any:
a__ =self.dummy_prior
a__ =self.dummy_image_encoder
a__ =self.dummy_text_encoder
a__ =self.dummy_tokenizer
a__ =self.dummy_image_processor
a__ =UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=lowercase_ , clip_sample_range=10.0 , )
a__ ={
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def __UpperCamelCase ( self , lowercase_ , lowercase_=0) -> Tuple:
if str(lowercase_).startswith('mps'):
a__ =torch.manual_seed(lowercase_)
else:
a__ =torch.Generator(device=lowercase_).manual_seed(lowercase_)
a__ ={
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self) -> int:
a__ ='cpu'
a__ =self.get_dummy_components()
a__ =self.pipeline_class(**lowercase_)
a__ =pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
a__ =pipe(**self.get_dummy_inputs(lowercase_))
a__ =output.image_embeds
a__ =pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
a__ =image[0, -10:]
a__ =image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a__ =np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def __UpperCamelCase ( self) -> List[Any]:
a__ =torch_device == 'cpu'
a__ =True
a__ =False
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ , relax_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
@skip_mps
def __UpperCamelCase ( self) -> Optional[int]:
a__ =torch_device == 'cpu'
a__ =False
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
| 20 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger()
def _lowercase( __a : int , __a : str , __a : LevitConfig , __a : Path , __a : bool = True ):
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
a__ =timm.create_model('levit_128s' , pretrained=__a )
else:
a__ =timm.create_model('levit_128' , pretrained=__a )
if hidden_sizes == 192:
a__ =timm.create_model('levit_192' , pretrained=__a )
if hidden_sizes == 256:
a__ =timm.create_model('levit_256' , pretrained=__a )
if hidden_sizes == 384:
a__ =timm.create_model('levit_384' , pretrained=__a )
from_model.eval()
a__ =LevitForImageClassificationWithTeacher(__a ).eval()
a__ =OrderedDict()
a__ =from_model.state_dict()
a__ =list(from_model.state_dict().keys() )
a__ =list(our_model.state_dict().keys() )
print(len(__a ) , len(__a ) )
for i in range(len(__a ) ):
a__ =weights[og_keys[i]]
our_model.load_state_dict(__a )
a__ =torch.randn((2, 3, 224, 224) )
a__ =from_model(__a )
a__ =our_model(__a ).logits
assert torch.allclose(__a , __a ), "The model logits don't match the original one."
a__ =name
print(__a )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
a__ =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def _lowercase( __a : Path , __a : str = None , __a : bool = True ):
a__ ='imagenet-1k-id2label.json'
a__ =1000
a__ =(1, num_labels)
a__ ='huggingface/label-files'
a__ =num_labels
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
a__ =partial(__a , num_labels=__a , idalabel=__a , labelaid=__a )
a__ ={
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
a__ ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __a , names_to_config[model_name] , __a , __a )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __a , __a , __a , __a )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_lowerCAmelCase: Union[str, Any] = parser.parse_args()
_lowerCAmelCase: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 20 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: str = logging.get_logger(__name__)
def _lowercase( __a : List[str] , __a : List[str]=False ):
a__ =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a__ =[(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def _lowercase( __a : List[Any] , __a : List[Any] , __a : Union[str, Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
a__ =''
else:
a__ ='vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a__ =state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
a__ =state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
a__ =in_proj_weight[
: config.hidden_size, :
]
a__ =in_proj_bias[: config.hidden_size]
a__ =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a__ =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a__ =in_proj_weight[
-config.hidden_size :, :
]
a__ =in_proj_bias[-config.hidden_size :]
def _lowercase( __a : Optional[Any] ):
a__ =['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__a , __a )
def _lowercase( __a : List[Any] , __a : List[Any] , __a : List[str] ):
a__ =dct.pop(__a )
a__ =val
def _lowercase( ):
a__ ='http://images.cocodataset.org/val2017/000000039769.jpg'
a__ =Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def _lowercase( __a : Optional[int] , __a : List[Any] ):
a__ =ViTConfig()
a__ =False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
a__ =True
a__ =int(vit_name[-12:-10] )
a__ =int(vit_name[-9:-6] )
else:
a__ =1000
a__ ='huggingface/label-files'
a__ ='imagenet-1k-id2label.json'
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
a__ =int(vit_name[-6:-4] )
a__ =int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
a__ =192
a__ =768
a__ =12
a__ =3
elif vit_name[9:].startswith('small' ):
a__ =384
a__ =1536
a__ =12
a__ =6
else:
pass
else:
if vit_name[4:].startswith('small' ):
a__ =768
a__ =2304
a__ =8
a__ =8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
a__ =1024
a__ =4096
a__ =24
a__ =16
elif vit_name[4:].startswith('huge' ):
a__ =1280
a__ =5120
a__ =32
a__ =16
# load original model from timm
a__ =timm.create_model(__a , pretrained=__a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
a__ =timm_model.state_dict()
if base_model:
remove_classification_head_(__a )
a__ =create_rename_keys(__a , __a )
for src, dest in rename_keys:
rename_key(__a , __a , __a )
read_in_q_k_v(__a , __a , __a )
# load HuggingFace model
if vit_name[-5:] == "in21k":
a__ =ViTModel(__a ).eval()
else:
a__ =ViTForImageClassification(__a ).eval()
model.load_state_dict(__a )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
a__ =DeiTImageProcessor(size=config.image_size )
else:
a__ =ViTImageProcessor(size=config.image_size )
a__ =image_processor(images=prepare_img() , return_tensors='pt' )
a__ =encoding['pixel_values']
a__ =model(__a )
if base_model:
a__ =timm_model.forward_features(__a )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__a , outputs.pooler_output , atol=1e-3 )
else:
a__ =timm_model(__a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__a , outputs.logits , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_lowerCAmelCase: Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 20 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowerCAmelCase: int = logging.get_logger(__name__)
_lowerCAmelCase: Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_lowerCAmelCase: Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowercase( __a : Optional[Any] ):
a__ ={}
with open(__a , 'r' ) as file:
for line_number, line in enumerate(__a ):
a__ =line.strip()
if line:
a__ =line.split()
a__ =line_number
a__ =words[0]
a__ =value
return result
def _lowercase( __a : Dict , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : str ):
for attribute in key.split('.' ):
a__ =getattr(__a , __a )
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ =getattr(__a , __a ).shape
elif weight_type is not None and weight_type == "param":
a__ =hf_pointer
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =shape_pointer.shape
# let's reduce dimension
a__ =value[0]
else:
a__ =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a__ =value
elif weight_type == "weight_g":
a__ =value
elif weight_type == "weight_v":
a__ =value
elif weight_type == "bias":
a__ =value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =value
else:
a__ =value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _lowercase( __a : Optional[int] , __a : int , __a : Optional[int] , __a : Optional[Any] , __a : List[Any] ):
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ ='.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a__ ='.'.join([key, hf_param_name] )
else:
a__ =key
a__ =value if 'lm_head' in full_key else value[0]
_lowerCAmelCase: Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowercase( __a : Dict , __a : int , __a : int=None , __a : List[str]=None ):
a__ =False
for key, mapped_key in MAPPING.items():
a__ ='wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
a__ =True
if "*" in mapped_key:
a__ =name.split(__a )[0].split('.' )[-2]
a__ =mapped_key.replace('*' , __a )
if "weight_g" in name:
a__ ='weight_g'
elif "weight_v" in name:
a__ ='weight_v'
elif "bias" in name:
a__ ='bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ ='weight'
else:
a__ =None
if hf_dict is not None:
rename_dict(__a , __a , __a , __a , __a )
else:
set_recursively(__a , __a , __a , __a , __a )
return is_used
return is_used
def _lowercase( __a : Union[str, Any] , __a : List[str] , __a : Dict ):
a__ =[]
a__ =fairseq_model.state_dict()
a__ =hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a__ =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == 'group' , )
a__ =True
else:
a__ =load_wavaveca_layer(__a , __a , __a )
if not is_used:
unused_weights.append(__a )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _lowercase( __a : List[Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] ):
a__ =full_name.split('conv_layers.' )[-1]
a__ =name.split('.' )
a__ =int(items[0] )
a__ =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def _lowercase( __a : str , __a : str , __a : Any=None , __a : str=None , __a : Any=True , __a : Union[str, Any]=False ):
if config_path is not None:
a__ =WavaVecaConfig.from_pretrained(__a )
else:
a__ =WavaVecaConfig()
if is_seq_class:
a__ =read_txt_into_dict(__a )
a__ =idalabel
a__ =WavaVecaForSequenceClassification(__a )
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
feature_extractor.save_pretrained(__a )
elif is_finetuned:
if dict_path:
a__ =Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ =target_dict.pad_index
a__ =target_dict.bos_index
a__ =target_dict.eos_index
a__ =len(target_dict.symbols )
a__ =os.path.join(__a , 'vocab.json' )
if not os.path.isdir(__a ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
a__ =target_dict.indices
# fairseq has the <pad> and <s> switched
a__ =0
a__ =1
with open(__a , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__a , __a )
a__ =WavaVecaCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__a , )
a__ =True if config.feat_extract_norm == 'layer' else False
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
a__ =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
a__ =WavaVecaForCTC(__a )
else:
a__ =WavaVecaForPreTraining(__a )
if is_finetuned or is_seq_class:
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
a__ =argparse.Namespace(task='audio_pretraining' )
a__ =fairseq.tasks.setup_task(__a )
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__a )
a__ =model[0].eval()
recursively_load_weights(__a , __a , not is_finetuned )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
_lowerCAmelCase: Tuple = parser.parse_args()
_lowerCAmelCase: Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 20 | 1 |
from maths.prime_check import is_prime
def _lowercase( __a : int ):
if not isinstance(__a , __a ):
a__ =f"""Input value of [number={number}] must be an integer"""
raise TypeError(__a )
if is_prime(__a ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self) -> Optional[int]:
a__ =AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowercase_).to(lowercase_)
a__ =AutoTokenizer.from_pretrained('google/mt5-small')
a__ =tokenizer('Hello there' , return_tensors='pt').input_ids
a__ =tokenizer('Hi I am' , return_tensors='pt').input_ids
a__ =model(input_ids.to(lowercase_) , labels=labels.to(lowercase_)).loss
a__ =-(labels.shape[-1] * loss.item())
a__ =-84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 20 | 1 |
_lowerCAmelCase: Tuple = 'Alexander Joslin'
import operator as op
from .stack import Stack
def _lowercase( __a : str ):
a__ ={'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
a__ =Stack()
a__ =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__a ) )
elif i in operators:
# RULE 2
operator_stack.push(__a )
elif i == ")":
# RULE 4
a__ =operator_stack.peek()
operator_stack.pop()
a__ =operand_stack.peek()
operand_stack.pop()
a__ =operand_stack.peek()
operand_stack.pop()
a__ =operators[opr](__a , __a )
operand_stack.push(__a )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_lowerCAmelCase: Dict = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 20 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> int:
a__ =tempfile.mkdtemp()
a__ =BlipImageProcessor()
a__ =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel')
a__ =BlipProcessor(lowercase_ , lowercase_)
processor.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self , **lowercase_) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).tokenizer
def __UpperCamelCase ( self , **lowercase_) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).image_processor
def __UpperCamelCase ( self) -> Optional[int]:
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self) -> str:
a__ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
a__ =[Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self) -> str:
a__ =BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__ =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ =self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0)
a__ =BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase_)
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =self.prepare_image_inputs()
a__ =image_processor(lowercase_ , return_tensors='np')
a__ =processor(images=lowercase_ , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =processor(text=lowercase_)
a__ =tokenizer(lowercase_ , return_token_type_ids=lowercase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(lowercase_):
processor()
def __UpperCamelCase ( self) -> Tuple:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ =processor.batch_decode(lowercase_)
a__ =tokenizer.batch_decode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
| 20 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase: Tuple = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowercase_ (lowercase__ ):
snake_case ='yolos'
def __init__( self , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=[512, 864] , lowercase_=16 , lowercase_=3 , lowercase_=True , lowercase_=100 , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> List[str]:
super().__init__(**lowercase_)
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =initializer_range
a__ =layer_norm_eps
a__ =image_size
a__ =patch_size
a__ =num_channels
a__ =qkv_bias
a__ =num_detection_tokens
a__ =use_mid_position_embeddings
a__ =auxiliary_loss
# Hungarian matcher
a__ =class_cost
a__ =bbox_cost
a__ =giou_cost
# Loss coefficients
a__ =bbox_loss_coefficient
a__ =giou_loss_coefficient
a__ =eos_coefficient
class lowercase_ (lowercase__ ):
snake_case =version.parse('1.11' )
@property
def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __UpperCamelCase ( self) -> float:
return 1e-4
@property
def __UpperCamelCase ( self) -> int:
return 12
| 20 |
def _lowercase( __a : list[int] ):
a__ =len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
a__ , a__ =numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_lowerCAmelCase: Tuple = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase: int = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 20 | 1 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 20 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="resnet50" , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=True , lowercase_=True , ) -> Union[str, Any]:
a__ =parent
a__ =out_indices if out_indices is not None else [4]
a__ =stage_names
a__ =out_features
a__ =backbone
a__ =batch_size
a__ =image_size
a__ =num_channels
a__ =use_pretrained_backbone
a__ =is_training
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =self.get_config()
return config, pixel_values
def __UpperCamelCase ( self) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =TimmBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
a__ =model(lowercase_)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __UpperCamelCase ( self) -> str:
a__ =self.prepare_config_and_inputs()
a__ , a__ =config_and_inputs
a__ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(TimmBackbone,) if is_torch_available() else ()
snake_case ={'feature-extraction': TimmBackbone} if is_torch_available() else {}
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =TimmBackboneModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def __UpperCamelCase ( self) -> Dict:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self) -> str:
a__ ='resnet18'
a__ ='microsoft/resnet-18'
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_)
a__ =AutoBackbone.from_pretrained(lowercase_)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3])
a__ =AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> Dict:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.')
def __UpperCamelCase ( self) -> str:
pass
@unittest.skip('Safetensors is not supported by timm.')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self) -> Optional[Any]:
pass
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =True
a__ =self.has_attentions
# no need to test all models as different heads yield the same functionality
a__ =self.all_model_classes[0]
a__ =model_class(lowercase_)
model.to(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model(**lowercase_)
a__ =outputs[0][-1]
# Encoder-/Decoder-only models
a__ =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a__ =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def __UpperCamelCase ( self) -> List[str]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
a__ =copy.deepcopy(lowercase_)
a__ =None
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
a__ =copy.deepcopy(lowercase_)
a__ =False
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
| 20 | 1 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> Tuple:
debug_launcher(test_script.main)
def __UpperCamelCase ( self) -> int:
debug_launcher(test_ops.main)
| 20 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCAmelCase: Optional[Any] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[str] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20 | 1 |
def _lowercase( __a : int ):
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
a__ =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
a__ =1
if upper_limit > 0:
a__ =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
_lowerCAmelCase: str = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 20 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase: str = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowercase_ (lowercase__ ):
snake_case ='big_bird'
def __init__( self , lowercase_=50358 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu_new" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=4096 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=66 , lowercase_="block_sparse" , lowercase_=True , lowercase_=False , lowercase_=64 , lowercase_=3 , lowercase_=None , **lowercase_ , ) -> Any:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , )
a__ =vocab_size
a__ =max_position_embeddings
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =initializer_range
a__ =type_vocab_size
a__ =layer_norm_eps
a__ =use_cache
a__ =rescale_embeddings
a__ =attention_type
a__ =use_bias
a__ =block_size
a__ =num_random_blocks
a__ =classifier_dropout
class lowercase_ (lowercase__ ):
@property
def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a__ ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__ ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 20 | 1 |
import os
import sys
import unittest
_lowerCAmelCase: int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_lowerCAmelCase: int = os.path.join(git_repo_path, 'src', 'transformers')
_lowerCAmelCase: int = '\n{0} = None\n'
_lowerCAmelCase: List[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
_lowerCAmelCase: Union[str, Any] = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> str:
a__ =find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")')
self.assertIsNone(lowercase_)
a__ =find_backend(' if not is_tokenizers_available():')
self.assertEqual(lowercase_ , 'tokenizers')
a__ =find_backend(' if not is_tensorflow_text_available():')
self.assertEqual(lowercase_ , 'tensorflow_text')
a__ =find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):')
self.assertEqual(lowercase_ , 'sentencepiece_and_tokenizers')
a__ =find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):')
self.assertEqual(lowercase_ , 'sentencepiece_and_tensorflow_text')
a__ =find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):')
self.assertEqual(lowercase_ , 'sentencepiece_and_tokenizers_and_vision')
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , lowercase_)
self.assertIn('tensorflow_text' , lowercase_)
self.assertIn('sentencepiece_and_tokenizers' , lowercase_)
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'])
self.assertIn('TFBertModel' , objects['tf'])
self.assertIn('FlaxBertModel' , objects['flax'])
self.assertIn('BertModel' , objects['torch'])
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'])
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'])
def __UpperCamelCase ( self) -> List[str]:
a__ =create_dummy_object('CONSTANT' , '\'torch\'')
self.assertEqual(lowercase_ , '\nCONSTANT = None\n')
a__ =create_dummy_object('function' , '\'torch\'')
self.assertEqual(
lowercase_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n')
a__ ='\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
a__ =create_dummy_object('FakeClass' , '\'torch\'')
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> str:
a__ ='# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
a__ =create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']})
self.assertEqual(dummy_files['torch'] , lowercase_)
| 20 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger(__name__)
_lowerCAmelCase: Tuple = torch.device('cpu')
def _lowercase( ):
a__ ='http://images.cocodataset.org/val2017/000000039769.jpg'
a__ =Image.open(requests.get(__a , stream=__a ).raw )
return im
def _lowercase( __a : Optional[Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _lowercase( __a : int , __a : int , __a : Optional[Any] ):
a__ =dct.pop(__a )
a__ =val
def _lowercase( __a : Optional[Any] ):
a__ =[]
for k in state_dict.keys():
a__ =k
if ".pwconv" in k:
a__ =k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a__ =k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a__ =k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a__ =k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a__ =k_new.split('.' )
if ls[2].isdigit():
a__ ='swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__ =k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _lowercase( __a : Union[str, Any] , __a : int , __a : str ):
a__ =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ =1000
a__ ='huggingface/label-files'
a__ ='imagenet-1k-id2label.json'
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ =[3, 3, 6, 4]
a__ =[48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a__ =[3, 3, 9, 6]
a__ =[48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a__ =[4, 3, 10, 5]
a__ =[48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a__ =[4, 4, 12, 6]
a__ =[64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a__ =torch.hub.load_state_dict_from_url(__a , map_location='cpu' , check_hash=__a )
else:
a__ =torch.load(__a , map_location='cpu' )
a__ =checkpoint
a__ =create_rename_keys(__a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__a , __a , __a )
# load HuggingFace model
a__ =SwiftFormerForImageClassification(__a ).eval()
hf_model.load_state_dict(__a )
# prepare test inputs
a__ =prepare_img()
a__ =ViTImageProcessor.from_pretrained('preprocessor_config' )
a__ =processor(images=__a , return_tensors='pt' )
# compare outputs from both models
a__ =get_expected_output(__a )
a__ =hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __a , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_lowerCAmelCase: Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 20 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase: List[Any] = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =SpeechTaTokenizer
snake_case =False
snake_case =True
def __UpperCamelCase ( self) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
a__ =SpeechTaTokenizer(lowercase_)
a__ =AddedToken('<mask>' , lstrip=lowercase_ , rstrip=lowercase_)
a__ =mask_token
tokenizer.add_special_tokens({'mask_token': mask_token})
tokenizer.add_tokens(['<ctc_blank>'])
tokenizer.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self , lowercase_) -> str:
a__ ='this is a test'
a__ ='this is a test'
return input_text, output_text
def __UpperCamelCase ( self , lowercase_ , lowercase_=False , lowercase_=20 , lowercase_=5) -> str:
a__ , a__ =self.get_input_output_texts(lowercase_)
a__ =tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
a__ =tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_)
return text, ids
def __UpperCamelCase ( self) -> str:
a__ ='<pad>'
a__ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_) , lowercase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_) , lowercase_)
def __UpperCamelCase ( self) -> str:
a__ =list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-4] , 'œ')
self.assertEqual(vocab_keys[-2] , '<mask>')
self.assertEqual(vocab_keys[-1] , '<ctc_blank>')
self.assertEqual(len(lowercase_) , 81)
def __UpperCamelCase ( self) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 79)
def __UpperCamelCase ( self) -> str:
a__ =self.get_tokenizers(do_lower_case=lowercase_)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
a__ =tokenizer.vocab_size
a__ =len(lowercase_)
self.assertNotEqual(lowercase_ , 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
a__ =['aaaaa bbbbbb', 'cccccccccdddddddd']
a__ =tokenizer.add_tokens(lowercase_)
a__ =tokenizer.vocab_size
a__ =len(lowercase_)
self.assertNotEqual(lowercase_ , 0)
self.assertEqual(lowercase_ , lowercase_)
self.assertEqual(lowercase_ , len(lowercase_))
self.assertEqual(lowercase_ , all_size + len(lowercase_))
a__ =tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=lowercase_)
self.assertGreaterEqual(len(lowercase_) , 4)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
a__ ={'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
a__ =tokenizer.add_special_tokens(lowercase_)
a__ =tokenizer.vocab_size
a__ =len(lowercase_)
self.assertNotEqual(lowercase_ , 0)
self.assertEqual(lowercase_ , lowercase_)
self.assertEqual(lowercase_ , len(lowercase_))
self.assertEqual(lowercase_ , all_size_a + len(lowercase_))
a__ =tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=lowercase_)
self.assertGreaterEqual(len(lowercase_) , 6)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[0] , tokens[1])
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokens[-4])
self.assertEqual(tokens[0] , tokenizer.eos_token_id)
self.assertEqual(tokens[-3] , tokenizer.pad_token_id)
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
def __UpperCamelCase ( self) -> Optional[Any]:
pass
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =self.get_tokenizer()
a__ =tokenizer.tokenize('This is a test')
# fmt: off
self.assertListEqual(lowercase_ , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'])
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
a__ =tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowercase_ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'])
a__ =tokenizer.convert_tokens_to_ids(lowercase_)
# fmt: off
self.assertListEqual(lowercase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26])
# fmt: on
a__ =tokenizer.convert_ids_to_tokens(lowercase_)
self.assertListEqual(
lowercase_ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'])
@slow
def __UpperCamelCase ( self) -> Optional[int]:
# Use custom sequence because this tokenizer does not handle numbers.
a__ =[
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
a__ ={
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=lowercase_ , )
| 20 |
from __future__ import annotations
from typing import Any
class lowercase_ :
def __init__( self , lowercase_) -> None:
a__ =num_of_nodes
a__ =[]
a__ ={}
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
self.m_edges.append([u_node, v_node, weight])
def __UpperCamelCase ( self , lowercase_) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def __UpperCamelCase ( self , lowercase_) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
a__ =self.find_component(lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
if component_size[u_node] <= component_size[v_node]:
a__ =v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowercase_)
elif component_size[u_node] >= component_size[v_node]:
a__ =self.find_component(lowercase_)
component_size[u_node] += component_size[v_node]
self.set_component(lowercase_)
def __UpperCamelCase ( self) -> None:
a__ =[]
a__ =0
a__ =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
a__ =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
a__ =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowercase_ , lowercase_):
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowercase_ , lowercase_ , lowercase_)
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""")
num_of_components -= 1
a__ =[-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""")
def _lowercase( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 1 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger()
def _lowercase( __a : int , __a : str , __a : LevitConfig , __a : Path , __a : bool = True ):
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
a__ =timm.create_model('levit_128s' , pretrained=__a )
else:
a__ =timm.create_model('levit_128' , pretrained=__a )
if hidden_sizes == 192:
a__ =timm.create_model('levit_192' , pretrained=__a )
if hidden_sizes == 256:
a__ =timm.create_model('levit_256' , pretrained=__a )
if hidden_sizes == 384:
a__ =timm.create_model('levit_384' , pretrained=__a )
from_model.eval()
a__ =LevitForImageClassificationWithTeacher(__a ).eval()
a__ =OrderedDict()
a__ =from_model.state_dict()
a__ =list(from_model.state_dict().keys() )
a__ =list(our_model.state_dict().keys() )
print(len(__a ) , len(__a ) )
for i in range(len(__a ) ):
a__ =weights[og_keys[i]]
our_model.load_state_dict(__a )
a__ =torch.randn((2, 3, 224, 224) )
a__ =from_model(__a )
a__ =our_model(__a ).logits
assert torch.allclose(__a , __a ), "The model logits don't match the original one."
a__ =name
print(__a )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
a__ =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def _lowercase( __a : Path , __a : str = None , __a : bool = True ):
a__ ='imagenet-1k-id2label.json'
a__ =1000
a__ =(1, num_labels)
a__ ='huggingface/label-files'
a__ =num_labels
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
a__ =partial(__a , num_labels=__a , idalabel=__a , labelaid=__a )
a__ ={
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
a__ ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __a , names_to_config[model_name] , __a , __a )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __a , __a , __a , __a )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_lowerCAmelCase: Union[str, Any] = parser.parse_args()
_lowerCAmelCase: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 20 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCAmelCase: Union[str, Any] = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
_lowerCAmelCase: Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
_lowerCAmelCase: List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
def __UpperCamelCase ( self) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=True , lowercase_=False) -> Any:
if rouge_types is None:
a__ =['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
a__ =rouge_scorer.RougeScorer(rouge_types=lowercase_ , use_stemmer=lowercase_)
if use_aggregator:
a__ =scoring.BootstrapAggregator()
else:
a__ =[]
for ref, pred in zip(lowercase_ , lowercase_):
a__ =scorer.score(lowercase_ , lowercase_)
if use_aggregator:
aggregator.add_scores(lowercase_)
else:
scores.append(lowercase_)
if use_aggregator:
a__ =aggregator.aggregate()
else:
a__ ={}
for key in scores[0]:
a__ =[score[key] for score in scores]
return result
| 20 | 1 |
def _lowercase( __a : int = 200_0000 ):
a__ =[0 for i in range(n + 1 )]
a__ =1
a__ =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , __a ):
a__ =1
a__ =0
for i in range(__a ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 |
from __future__ import annotations
_lowerCAmelCase: str = '#'
class lowercase_ :
def __init__( self) -> None:
a__ ={}
def __UpperCamelCase ( self , lowercase_) -> None:
a__ =self._trie
for char in text:
if char not in trie:
a__ ={}
a__ =trie[char]
a__ =True
def __UpperCamelCase ( self , lowercase_) -> tuple | list:
a__ =self._trie
for char in prefix:
if char in trie:
a__ =trie[char]
else:
return []
return self._elements(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> tuple:
a__ =[]
for c, v in d.items():
a__ =[' '] if c == END else [(c + s) for s in self._elements(lowercase_)]
result.extend(lowercase_)
return tuple(lowercase_)
_lowerCAmelCase: Optional[int] = Trie()
_lowerCAmelCase: List[str] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _lowercase( __a : str ):
a__ =trie.find_word(__a )
return tuple(string + word for word in suffixes )
def _lowercase( ):
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 | 1 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowercase_ :
snake_case =None
def __UpperCamelCase ( self) -> Optional[int]:
a__ =self.feature_extraction_class(**self.feat_extract_dict)
a__ =json.loads(feat_extract.to_json_string())
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , lowercase_)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
a__ =os.path.join(lowercase_ , 'feat_extract.json')
feat_extract_first.to_json_file(lowercase_)
a__ =self.feature_extraction_class.from_json_file(lowercase_)
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict())
def __UpperCamelCase ( self) -> Tuple:
a__ =self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
a__ =feat_extract_first.save_pretrained(lowercase_)[0]
check_json_file_has_correct_format(lowercase_)
a__ =self.feature_extraction_class.from_pretrained(lowercase_)
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict())
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.feature_extraction_class()
self.assertIsNotNone(lowercase_)
| 20 |
_lowerCAmelCase: List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _lowercase( ):
a__ =input('Enter message: ' )
a__ =input('Enter key [alphanumeric]: ' )
a__ =input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
a__ ='encrypt'
a__ =encrypt_message(__a , __a )
elif mode.lower().startswith('d' ):
a__ ='decrypt'
a__ =decrypt_message(__a , __a )
print(f"""\n{mode.title()}ed message:""" )
print(__a )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'encrypt' )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'decrypt' )
def _lowercase( __a : str , __a : str , __a : str ):
a__ =[]
a__ =0
a__ =key.upper()
for symbol in message:
a__ =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__a )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__a ):
a__ =0
else:
translated.append(__a )
return "".join(__a )
if __name__ == "__main__":
main()
| 20 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=2 , lowercase_=True , lowercase_=False , lowercase_=10 , lowercase_=3 , lowercase_=32 * 4 , lowercase_=32 * 6 , lowercase_=4 , lowercase_=32 , ) -> Union[str, Any]:
a__ =parent
a__ =batch_size
a__ =is_training
a__ =use_auxiliary_loss
a__ =num_queries
a__ =num_channels
a__ =min_size
a__ =max_size
a__ =num_labels
a__ =mask_feature_size
def __UpperCamelCase ( self) -> int:
a__ =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
lowercase_)
a__ =torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowercase_)
a__ =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowercase_) > 0.5
).float()
a__ =(torch.rand((self.batch_size, self.num_labels) , device=lowercase_) > 0.5).long()
a__ =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __UpperCamelCase ( self) -> str:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def __UpperCamelCase ( self) -> Tuple:
a__ , a__ , a__ , a__ , a__ =self.prepare_config_and_inputs()
a__ ={'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =output.encoder_hidden_states
a__ =output.pixel_decoder_hidden_states
a__ =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowercase_) , len(config.backbone_config.depths))
self.parent.assertTrue(len(lowercase_) , len(config.backbone_config.depths))
self.parent.assertTrue(len(lowercase_) , config.decoder_config.decoder_layers)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=False) -> List[str]:
with torch.no_grad():
a__ =MaskFormerModel(config=lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(pixel_values=lowercase_ , pixel_mask=lowercase_)
a__ =model(lowercase_ , output_hidden_states=lowercase_)
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(lowercase_ , lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Union[str, Any]:
a__ =MaskFormerForInstanceSegmentation(config=lowercase_)
model.to(lowercase_)
model.eval()
def comm_check_on_output(lowercase_):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
a__ =model(pixel_values=lowercase_ , pixel_mask=lowercase_)
a__ =model(lowercase_)
comm_check_on_output(lowercase_)
a__ =model(
pixel_values=lowercase_ , pixel_mask=lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_)
comm_check_on_output(lowercase_)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class lowercase_ (lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
snake_case =(
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =MaskFormerModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def __UpperCamelCase ( self) -> List[str]:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowercase_)
@unittest.skip(reason='MaskFormer does not use inputs_embeds')
def __UpperCamelCase ( self) -> Optional[Any]:
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method')
def __UpperCamelCase ( self) -> Dict:
pass
@unittest.skip(reason='MaskFormer is not a generative model')
def __UpperCamelCase ( self) -> Dict:
pass
@unittest.skip(reason='MaskFormer does not use token embeddings')
def __UpperCamelCase ( self) -> Any:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self) -> Tuple:
pass
def __UpperCamelCase ( self) -> List[Any]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
@slow
def __UpperCamelCase ( self) -> Any:
for model_name in ["facebook/maskformer-swin-small-coco"]:
a__ =MaskFormerModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def __UpperCamelCase ( self) -> int:
a__ =(self.model_tester.min_size,) * 2
a__ ={
'pixel_values': torch.randn((2, 3, *size) , device=lowercase_),
'mask_labels': torch.randn((2, 10, *size) , device=lowercase_),
'class_labels': torch.zeros(2 , 10 , device=lowercase_).long(),
}
a__ =MaskFormerForInstanceSegmentation(MaskFormerConfig()).to(lowercase_)
a__ =model(**lowercase_)
self.assertTrue(outputs.loss is not None)
def __UpperCamelCase ( self) -> List[Any]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_)
def __UpperCamelCase ( self) -> int:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_).to(lowercase_)
a__ =model(**lowercase_ , output_attentions=lowercase_)
self.assertTrue(outputs.attentions is not None)
def __UpperCamelCase ( self) -> List[str]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
a__ =self.all_model_classes[1]
a__ , a__ , a__ , a__ , a__ =self.model_tester.prepare_config_and_inputs()
a__ =model_class(lowercase_)
model.to(lowercase_)
model.train()
a__ =model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_).loss
loss.backward()
def __UpperCamelCase ( self) -> Union[str, Any]:
# only MaskFormerForInstanceSegmentation has the loss
a__ =self.all_model_classes[1]
a__ , a__ , a__ , a__ , a__ =self.model_tester.prepare_config_and_inputs()
a__ =True
a__ =True
a__ =model_class(lowercase_)
model.to(lowercase_)
model.train()
a__ =model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_)
a__ =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
a__ =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
a__ =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
a__ =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowercase_)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
_lowerCAmelCase: Optional[int] = 1e-4
def _lowercase( ):
a__ =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowercase_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self) -> Tuple:
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco')
if is_vision_available()
else None
)
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco').to(lowercase_)
a__ =self.default_image_processor
a__ =prepare_img()
a__ =image_processor(lowercase_ , return_tensors='pt').to(lowercase_)
a__ =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1088))
with torch.no_grad():
a__ =model(**lowercase_)
a__ =torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]]).to(lowercase_)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_))
a__ =torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]]).to(lowercase_)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_))
a__ =torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]]).to(lowercase_)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowercase_ , atol=lowercase_))
def __UpperCamelCase ( self) -> Dict:
a__ =(
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco')
.to(lowercase_)
.eval()
)
a__ =self.default_image_processor
a__ =prepare_img()
a__ =image_processor(lowercase_ , return_tensors='pt').to(lowercase_)
a__ =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1088))
with torch.no_grad():
a__ =model(**lowercase_)
# masks_queries_logits
a__ =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
a__ =[
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
a__ =torch.tensor(lowercase_).to(lowercase_)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase_ , atol=lowercase_))
# class_queries_logits
a__ =outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
a__ =torch.tensor(
[
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6169e-02, -5.9025e00, -2.9313e00],
[1.0766e-04, -7.7630e00, -5.1263e00],
]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase_ , atol=lowercase_))
def __UpperCamelCase ( self) -> List[str]:
a__ =(
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff')
.to(lowercase_)
.eval()
)
a__ =self.default_image_processor
a__ =prepare_img()
a__ =image_processor(lowercase_ , return_tensors='pt').to(lowercase_)
a__ =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1088))
with torch.no_grad():
a__ =model(**lowercase_)
# masks_queries_logits
a__ =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
a__ =[[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
a__ =torch.tensor(lowercase_).to(lowercase_)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase_ , atol=lowercase_))
# class_queries_logits
a__ =outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
a__ =torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase_ , atol=lowercase_))
def __UpperCamelCase ( self) -> str:
a__ =(
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco')
.to(lowercase_)
.eval()
)
a__ =self.default_image_processor
a__ =image_processor(
[np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors='pt' , )
a__ =inputs['pixel_values'].to(lowercase_)
a__ =[el.to(lowercase_) for el in inputs['mask_labels']]
a__ =[el.to(lowercase_) for el in inputs['class_labels']]
with torch.no_grad():
a__ =model(**lowercase_)
self.assertTrue(outputs.loss is not None)
| 20 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20 | 1 |
def _lowercase( __a : int ):
if num < 0:
return False
a__ =num
a__ =0
while num > 0:
a__ =rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =KandinskyVaaPriorPipeline
snake_case =['prompt']
snake_case =['prompt', 'negative_prompt']
snake_case =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
snake_case =False
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 32
@property
def __UpperCamelCase ( self) -> Tuple:
return 32
@property
def __UpperCamelCase ( self) -> int:
return self.time_input_dim
@property
def __UpperCamelCase ( self) -> str:
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 100
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
torch.manual_seed(0)
a__ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase_)
@property
def __UpperCamelCase ( self) -> Tuple:
torch.manual_seed(0)
a__ ={
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
a__ =PriorTransformer(**lowercase_)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a__ =nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def __UpperCamelCase ( self) -> Any:
torch.manual_seed(0)
a__ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a__ =CLIPVisionModelWithProjection(lowercase_)
return model
@property
def __UpperCamelCase ( self) -> Optional[int]:
a__ =CLIPImageProcessor(
crop_size=224 , do_center_crop=lowercase_ , do_normalize=lowercase_ , do_resize=lowercase_ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def __UpperCamelCase ( self) -> Any:
a__ =self.dummy_prior
a__ =self.dummy_image_encoder
a__ =self.dummy_text_encoder
a__ =self.dummy_tokenizer
a__ =self.dummy_image_processor
a__ =UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=lowercase_ , clip_sample_range=10.0 , )
a__ ={
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def __UpperCamelCase ( self , lowercase_ , lowercase_=0) -> Tuple:
if str(lowercase_).startswith('mps'):
a__ =torch.manual_seed(lowercase_)
else:
a__ =torch.Generator(device=lowercase_).manual_seed(lowercase_)
a__ ={
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self) -> int:
a__ ='cpu'
a__ =self.get_dummy_components()
a__ =self.pipeline_class(**lowercase_)
a__ =pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
a__ =pipe(**self.get_dummy_inputs(lowercase_))
a__ =output.image_embeds
a__ =pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
a__ =image[0, -10:]
a__ =image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a__ =np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def __UpperCamelCase ( self) -> List[Any]:
a__ =torch_device == 'cpu'
a__ =True
a__ =False
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ , relax_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
@skip_mps
def __UpperCamelCase ( self) -> Optional[int]:
a__ =torch_device == 'cpu'
a__ =False
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
| 20 | 1 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowerCAmelCase: Optional[int] = logging.getLogger(__name__)
def _lowercase( __a : torch.nn.Module , __a : BnbQuantizationConfig , __a : Union[str, os.PathLike] = None , __a : Optional[Dict[str, Union[int, str, torch.device]]] = None , __a : Optional[List[str]] = None , __a : Optional[Dict[Union[int, str], Union[int, str]]] = None , __a : Optional[Union[str, os.PathLike]] = None , __a : bool = False , ):
a__ =bnb_quantization_config.load_in_abit
a__ =bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
a__ =[]
# custom device map
if isinstance(__a , __a ) and len(device_map.keys() ) > 1:
a__ =[key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
a__ =get_keys_to_not_convert(__a )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__a )
a__ =bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
a__ =[]
a__ =bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__a )
# compatibility with peft
a__ =load_in_abit
a__ =load_in_abit
a__ =get_parameter_device(__a )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
a__ =replace_with_bnb_layers(__a , __a , modules_to_not_convert=__a )
# convert param to the right dtype
a__ =bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
a__ =name.replace('.weight' , '' ).replace('.bias' , '' )
a__ =getattr(__a , __a , __a )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__a ):
param.to(__a )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
a__ =replace_with_bnb_layers(
__a , __a , modules_to_not_convert=__a )
a__ =get_quantized_model_device_map(
__a , __a , __a , max_memory=__a , no_split_module_classes=__a , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
a__ =True
a__ =any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
__a , __a , __a , dtype=bnb_quantization_config.torch_dtype , offload_folder=__a , offload_state_dict=__a , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__a , device_map=__a , offload_dir=__a )
def _lowercase( __a : List[Any] , __a : Tuple , __a : Optional[int]=None , __a : Tuple=None , __a : Optional[Any]=None ):
if device_map is None:
if torch.cuda.is_available():
a__ ={'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(__a , __a ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
a__ ={}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
a__ ={}
a__ =special_dtypes
a__ =no_split_module_classes
a__ =bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
a__ =get_balanced_memory(
__a , low_zero=(device_map == 'balanced_low_0') , max_memory=__a , **__a , )
a__ =max_memory
a__ =infer_auto_device_map(__a , **__a )
if isinstance(__a , __a ):
# check if don't have any quantized module on the cpu
a__ =bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
a__ ={
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def _lowercase( __a : List[str] , __a : int , __a : Optional[Any]=None , __a : List[str]=None ):
if modules_to_not_convert is None:
a__ =[]
a__ , a__ =_replace_with_bnb_layers(
__a , __a , __a , __a )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def _lowercase( __a : Dict , __a : List[str] , __a : int=None , __a : Union[str, Any]=None , ):
a__ =False
for name, module in model.named_children():
if current_key_name is None:
a__ =[]
current_key_name.append(__a )
if isinstance(__a , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
a__ ='.'.join(__a )
a__ =True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
a__ =False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
a__ =bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__a , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
a__ =bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
a__ =module.weight.data
if module.bias is not None:
a__ =module.bias.data
bnb_module.requires_grad_(__a )
setattr(__a , __a , __a )
a__ =True
if len(list(module.children() ) ) > 0:
a__ , a__ =_replace_with_bnb_layers(
__a , __a , __a , __a )
a__ =has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _lowercase( __a : Optional[int] ):
# Create a copy of the model
with init_empty_weights():
a__ =deepcopy(__a ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
a__ =find_tied_parameters(__a )
# For compatibility with Accelerate < 0.18
if isinstance(__a , __a ):
a__ =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
a__ =sum(__a , [] )
a__ =len(__a ) > 0
# Check if it is a base model
a__ =False
if hasattr(__a , 'base_model_prefix' ):
a__ =not hasattr(__a , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
a__ =list(model.named_children() )
a__ =[list_modules[-1][0]]
# add last module together with tied weights
a__ =set(__a ) - set(__a )
a__ =list(set(__a ) ) + list(__a )
# remove ".weight" from the keys
a__ =['.weight', '.bias']
a__ =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
a__ =name.replace(__a , '' )
filtered_module_names.append(__a )
return filtered_module_names
def _lowercase( __a : List[str] ):
for m in model.modules():
if isinstance(__a , bnb.nn.Linearabit ):
return True
return False
def _lowercase( __a : nn.Module ):
return next(parameter.parameters() ).device
def _lowercase( __a : Optional[Any] , __a : Any , __a : Tuple , __a : Any , __a : Optional[Any] , __a : int , __a : List[Any] ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(__a , __a , 0 , dtype=__a , value=__a )
a__ =param_name
a__ =model
if "." in tensor_name:
a__ =tensor_name.split('.' )
for split in splits[:-1]:
a__ =getattr(__a , __a )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
a__ =new_module
a__ =splits[-1]
# offload weights
a__ =False
offload_weight(module._parameters[tensor_name] , __a , __a , index=__a )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , __a , index=__a , )
else:
offload_weight(__a , __a , __a , index=__a )
offload_weight(__a , param_name.replace('weight' , 'SCB' ) , __a , index=__a )
set_module_tensor_to_device(__a , __a , 'meta' , dtype=__a , value=torch.empty(*param.size() ) )
| 20 |
from manim import *
class lowercase_ (lowercase__ ):
def __UpperCamelCase ( self) -> List[Any]:
a__ =Rectangle(height=0.5 , width=0.5)
a__ =Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
a__ =[mem.copy() for i in range(6)]
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('CPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(4)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('GPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
gpu.move_to([-1, -1, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Model' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
model.move_to([3, -1.0, 0])
self.add(lowercase_)
a__ =[]
for i, rect in enumerate(lowercase_):
rect.set_stroke(lowercase_)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
a__ =Rectangle(height=0.46 / 4 , width=0.46 / 3).set_stroke(width=0.0).set_fill(lowercase_ , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowercase_)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0)
self.add(lowercase_)
cpu_targs.append(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Loaded Checkpoint' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4)
checkpoint.move_to([3, 0.5, 0])
a__ =Square(side_length=2.2)
key.move_to([-5, 2, 0])
a__ =MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(lowercase_ , lowercase_)
a__ =MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left())
a__ =MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(lowercase_) , Write(lowercase_))
self.play(Write(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1))
a__ =[]
a__ =[]
for i, rect in enumerate(lowercase_):
a__ =fill.copy().set_fill(lowercase_ , opacity=0.7)
target.move_to(lowercase_)
first_animations.append(GrowFromCenter(lowercase_ , run_time=1))
a__ =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5))
self.play(*lowercase_)
self.play(*lowercase_)
self.wait()
| 20 | 1 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=False , lowercase_=True , lowercase_=False , lowercase_=False , lowercase_=19 , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=16 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> int:
a__ =parent
a__ =batch_size
a__ =seq_length
a__ =is_training
a__ =use_input_mask
a__ =use_token_type_ids
a__ =use_labels
a__ =vocab_size
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =max_position_embeddings
a__ =type_vocab_size
a__ =type_sequence_label_size
a__ =initializer_range
a__ =num_labels
a__ =num_choices
a__ =scope
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__ =None
if self.use_input_mask:
a__ =random_attention_mask([self.batch_size, self.seq_length])
a__ =None
a__ =None
a__ =None
if self.use_labels:
a__ =ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a__ =ids_tensor([self.batch_size] , self.num_choices)
a__ =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self) -> Dict:
a__ =EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=lowercase_ , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> int:
a__ =EsmForProteinFolding(config=lowercase_).float()
model.to(lowercase_)
model.eval()
a__ =model(lowercase_ , attention_mask=lowercase_)
a__ =model(lowercase_)
a__ =model(lowercase_)
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3))
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2))
def __UpperCamelCase ( self) -> str:
a__ =self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) =config_and_inputs
a__ ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =False
snake_case =(EsmForProteinFolding,) if is_torch_available() else ()
snake_case =()
snake_case ={} if is_torch_available() else {}
snake_case =False
def __UpperCamelCase ( self) -> Dict:
a__ =EsmFoldModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , hidden_size=37)
def __UpperCamelCase ( self) -> List[Any]:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
@unittest.skip('Does not support attention outputs')
def __UpperCamelCase ( self) -> Tuple:
pass
@unittest.skip
def __UpperCamelCase ( self) -> List[Any]:
pass
@unittest.skip('Esm does not support embedding resizing')
def __UpperCamelCase ( self) -> Tuple:
pass
@unittest.skip('Esm does not support embedding resizing')
def __UpperCamelCase ( self) -> str:
pass
@unittest.skip('ESMFold does not support passing input embeds!')
def __UpperCamelCase ( self) -> Dict:
pass
@unittest.skip('ESMFold does not support head pruning.')
def __UpperCamelCase ( self) -> List[Any]:
pass
@unittest.skip('ESMFold does not support head pruning.')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('ESMFold does not support head pruning.')
def __UpperCamelCase ( self) -> Dict:
pass
@unittest.skip('ESMFold does not support head pruning.')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('ESMFold does not support head pruning.')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.')
def __UpperCamelCase ( self) -> Dict:
pass
@unittest.skip('ESMFold only has one output format.')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('ESMFold does not support input chunking.')
def __UpperCamelCase ( self) -> Optional[Any]:
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.')
def __UpperCamelCase ( self) -> str:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.')
def __UpperCamelCase ( self) -> Optional[Any]:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('ESMFold doesn\'t support data parallel.')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self) -> List[str]:
pass
@require_torch
class lowercase_ (lowercase__ ):
@slow
def __UpperCamelCase ( self) -> Dict:
a__ =EsmForProteinFolding.from_pretrained('facebook/esmfold_v1').float()
model.eval()
a__ =torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
a__ =model(lowercase_)['positions']
a__ =torch.tensor([2.58_28, 0.79_93, -10.93_34] , dtype=torch.floataa)
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , lowercase_ , atol=1e-4))
| 20 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowerCAmelCase: Any = sys.version_info >= (3, 10)
def _lowercase( __a : int=None , __a : Any=None ):
return field(default_factory=lambda: default , metadata=__a )
@dataclass
class lowercase_ :
snake_case =42
snake_case =42
snake_case =42
snake_case =42
@dataclass
class lowercase_ :
snake_case =42
snake_case =field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
snake_case =42
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =BasicEnum(self.foo)
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =MixedTypeEnum(self.foo)
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
@dataclass
class lowercase_ :
snake_case =list_field(default=[] )
snake_case =list_field(default=[1, 2, 3] )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case =list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowercase_ :
snake_case =field()
snake_case =field()
snake_case =field()
def __UpperCamelCase ( self) -> List[Any]:
a__ =BasicEnum(self.required_enum)
@dataclass
class lowercase_ :
snake_case =42
snake_case =field()
snake_case =None
snake_case =field(default='toto' , metadata={'help': 'help message'} )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> int:
self.assertEqual(len(a._actions) , len(b._actions))
for x, y in zip(a._actions , b._actions):
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , lowercase_) and yy.get('choices' , lowercase_):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](lowercase_) , yy['type'](lowercase_))
del xx["type"], yy["type"]
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument('--bar' , type=lowercase_ , required=lowercase_)
expected.add_argument('--baz' , type=lowercase_ , required=lowercase_)
expected.add_argument('--flag' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
self.argparsersEqual(lowercase_ , lowercase_)
a__ =['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((a__) , ) =parser.parse_args_into_dataclasses(lowercase_ , look_for_args_file=lowercase_)
self.assertFalse(example.flag)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
expected.add_argument('--baz' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=lowercase_ , dest='baz')
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
a__ =[WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--no_baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto)
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args_into_dataclasses(['--foo', 'titi'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi)
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
a__ =parser.parse_args_into_dataclasses(['--foo', '42'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo)
def __UpperCamelCase ( self) -> List[Any]:
@dataclass
class lowercase_ :
snake_case ="toto"
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
def __UpperCamelCase ( self) -> Optional[int]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=lowercase_)
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(
lowercase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3]) , )
a__ =parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split())
self.assertEqual(lowercase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7]))
def __UpperCamelCase ( self) -> Dict:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=lowercase_ , type=lowercase_)
expected.add_argument('--bar' , default=lowercase_ , type=lowercase_ , help='help message')
expected.add_argument('--baz' , default=lowercase_ , type=lowercase_)
expected.add_argument('--ces' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--des' , nargs='+' , default=[] , type=lowercase_)
a__ =[OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , bar=lowercase_ , baz=lowercase_ , ces=[] , des=[]))
a__ =parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split())
self.assertEqual(lowercase_ , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3]))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=lowercase_ , required=lowercase_)
expected.add_argument('--required_str' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
a__ =parser.parse_dict(lowercase_)[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(lowercase_ , parser.parse_dict , lowercase_ , allow_extra_keys=lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_json')
os.mkdir(lowercase_)
with open(temp_local_path + '.json' , 'w+') as f:
json.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.json'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_yaml')
os.mkdir(lowercase_)
with open(temp_local_path + '.yaml' , 'w+') as f:
yaml.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.yaml'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
self.assertIsNotNone(lowercase_)
| 20 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_lowerCAmelCase: int = logging.get_logger(__name__)
@dataclass
class lowercase_ (lowercase__ ):
snake_case =[
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **lowercase_) -> Dict:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
a__ =deprecated_arg[3:]
setattr(self , lowercase_ , not kwargs.pop(lowercase_))
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""")
a__ =kwargs.pop('torchscript' , self.torchscript)
a__ =kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics)
a__ =kwargs.pop('fp16_opt_level' , self.fpaa_opt_level)
super().__init__(**lowercase_)
snake_case =field(default=lowercase__ , metadata={'help': 'Trace the models using torchscript'} )
snake_case =field(default=lowercase__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
snake_case =field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def __UpperCamelCase ( self) -> Tuple["torch.device", int]:
requires_backends(self , ['torch'])
logger.info('PyTorch: setting up devices')
if not self.cuda:
a__ =torch.device('cpu')
a__ =0
elif is_torch_tpu_available():
a__ =xm.xla_device()
a__ =0
else:
a__ =torch.device('cuda' if torch.cuda.is_available() else 'cpu')
a__ =torch.cuda.device_count()
return device, n_gpu
@property
def __UpperCamelCase ( self) -> List[Any]:
return is_torch_tpu_available() and self.tpu
@property
def __UpperCamelCase ( self) -> int:
requires_backends(self , ['torch'])
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __UpperCamelCase ( self) -> "torch.device":
requires_backends(self , ['torch'])
return self._setup_devices[0]
@property
def __UpperCamelCase ( self) -> List[str]:
requires_backends(self , ['torch'])
return self._setup_devices[1]
@property
def __UpperCamelCase ( self) -> List[str]:
return self.n_gpu > 0
| 20 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class lowercase_ (lowercase__ ):
snake_case ='autoformer'
snake_case ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , lowercase_ = None , lowercase_ = None , lowercase_ = "student_t" , lowercase_ = "nll" , lowercase_ = 1 , lowercase_ = [1, 2, 3, 4, 5, 6, 7] , lowercase_ = True , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 64 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 32 , lowercase_ = 32 , lowercase_ = "gelu" , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 100 , lowercase_ = 0.02 , lowercase_ = True , lowercase_=True , lowercase_ = 10 , lowercase_ = 25 , lowercase_ = 3 , **lowercase_ , ) -> Union[str, Any]:
# time series specific configuration
a__ =prediction_length
a__ =context_length if context_length is not None else prediction_length
a__ =distribution_output
a__ =loss
a__ =input_size
a__ =num_time_features
a__ =lags_sequence
a__ =scaling
a__ =num_dynamic_real_features
a__ =num_static_real_features
a__ =num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`')
a__ =cardinality
else:
a__ =[0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`')
a__ =embedding_dimension
else:
a__ =[min(50 , (cat + 1) // 2) for cat in self.cardinality]
a__ =num_parallel_samples
# Transformer architecture configuration
a__ =input_size * len(self.lags_sequence) + self._number_of_features
a__ =d_model
a__ =encoder_attention_heads
a__ =decoder_attention_heads
a__ =encoder_ffn_dim
a__ =decoder_ffn_dim
a__ =encoder_layers
a__ =decoder_layers
a__ =dropout
a__ =attention_dropout
a__ =activation_dropout
a__ =encoder_layerdrop
a__ =decoder_layerdrop
a__ =activation_function
a__ =init_std
a__ =use_cache
# Autoformer
a__ =label_length
a__ =moving_average
a__ =autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def __UpperCamelCase ( self) -> int:
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 20 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.